diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 06b820aa845d..1518d641836e 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -3,9 +3,9 @@ # For more details, see https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners -/content/manuals/build/ @crazy-max @dvdksn +/content/manuals/build/ @crazy-max @ArthurFlag -/content/manuals/build-cloud/ @dvdksn +/content/manuals/build-cloud/ @crazy-max @craig-osterhout /content/manuals/compose/ @aevesdocker @@ -19,21 +19,15 @@ /content/manuals/docker-hub/ @craig-osterhout -/content/manuals/engine/ @dvdksn +/content/manuals/engine/ @thaJeztah @ArthurFlag -/content/reference/api/engine/ @dvdksn +/content/reference/api/engine/ @thaJeztah @ArthurFlag -/content/reference/cli/ @dvdksn +/content/reference/cli/ @thaJeztah @ArthurFlag /content/manuals/subscription/ @sarahsanders-docker -/content/manuals/security/ @aevesdocker - -/content/manuals/trusted-content/ @craig-osterhout - -/content/manuals/docker-hub/official_images/ @craig-osterhout - -/content/manuals/registry/ @craig-osterhout +/content/manuals/security/ @aevesdocker @sarahsanders-docker /content/manuals/admin/ @sarahsanders-docker @@ -41,6 +35,10 @@ /content/manuals/accounts/ @sarahsanders-docker -/hack @dvdksn +/content/manuals/ai/ @ArthurFlag + +/_vendor @sarahsanders-docker @ArthurFlag + +/content/manuals/cloud/ @craig-osterhout -/_vendor @dvdksn +/content/manuals/dhi/ @craig-osterhout diff --git a/.github/instructions/styleguide-instructions.md b/.github/instructions/styleguide-instructions.md new file mode 100644 index 000000000000..3e719392472c --- /dev/null +++ b/.github/instructions/styleguide-instructions.md @@ -0,0 +1,111 @@ +--- +applyTo: '**/*.md' +--- +# Documentation Writing Instructions + +These are our documentation writing style guidelines. + +## General Style tips + +* Get to the point fast. +* Talk like a person. +* Simpler is better. +* Be brief. Give customers just enough information to make decisions confidently. Prune every excess word. +* We use Hugo to generate our docs. + +## Grammar + +* Use present tense verbs (is, open) instead of past tense (was, opened). +* Write factual statements and direct commands. Avoid hypotheticals like "could" or "would". +* Use active voice where the subject performs the action. +* Write in second person (you) to speak directly to readers. +* Use gender-neutral language. +* Avoid multiple -ing words that can create ambiguity. +* Keep prepositional phrases simple and clear. +* Place modifiers close to what they modify. + +## Capitalization + +* Use sentence-style capitalization for everything except proper nouns. +* Always capitalize proper nouns. +* Don’t capitalize the spelled-out form of an acronym unless it's a proper noun. +* In programming languages, follow the traditional capitalization of keywords and other special terms. +* Don't use all uppercase for emphasis. + +## Numbers + +* Spell out numbers for zero through nine, unless space is limited. Use numerals for 10 and above. +* Spell out numbers at the beginning of a sentence. +* Spell out ordinal numbers such as first, second, and third. Don't add -ly to form adverbs from ordinal numbers. + +## Punctuation + +* Use short, simple sentences. +* End all sentences with a period. +* Use one space after punctuation marks. +* After a colon, capitalize only proper nouns. +* Avoid semicolons - use separate sentences instead. +* Use question marks sparingly. +* Don't use slashes (/) - use "or" instead. + +## Text formatting + +* UI elements, like menu items, dialog names, and names of text boxes, should be in bold text. +* Use code style for: + * Code elements, like method names, property names, and language keywords. + * SQL commands. + * Command-line commands. + * Database table and column names. + * Resource names (like virtual machine names) that shouldn't be localized. + * URLs that you don't want to be selectable. +* For code placeholders, if you want users to replace part of an input string with their own values, use angle brackets (less than < and greater than > characters) on that placeholder text. +* Don't apply an inline style like italic, bold, or inline code style to headings. + +## Alerts + +* Alerts are a Markdown extension to create block quotes that render with colors and icons that indicate the significance of the content. The following alert types are supported: + + * `[!NOTE]` Information the user should notice even if skimming. + * `[!TIP]` Optional information to help a user be more successful. + * `[!IMPORTANT]` Essential information required for user success. + * `[!CAUTION]` Negative potential consequences of an action. + * `[!WARNING]` Dangerous certain consequences of an action. + +## Links + +* Links to other documentation articles should be relative, not absolute. Include the `.md` suffix. +* Links to bookmarks within the same article should be relative and start with `#`. +* Link descriptions should be descriptive and make sense on their own. Don't use "click here" or "this link" or "here". + +## Images + +* Use images only when they add value. +* Images have a descriptive and meaningful alt text that starts with "Screenshot showing" and ends with ".". +* Videos have a descriptive and meaningful alt text or title that starts with "Video showing" and ends with ".". + +## Numbered steps + +* Write complete sentences with capitalization and periods +* Use imperative verbs +* Clearly indicate where actions take place (UI location) +* For single steps, use a bullet instead of a number +* When allowed, use angle brackets for menu sequences (File > Open) +* When writing ordered lists, only use 1's. + +## Terminology + +* Use "Select" instead of "Click" for UI elements like buttons, menu items, links, dropdowns, and checkboxes. +* Use "might" instead of "may" for conditional statements. +* Avoid latin abbreviations like "e.g.". Use "for example" instead. +* Use the verb "to enable" instead "to allow" unless you're referring to permissions. +* Follow the terms and capitalization guidelines in #fetch [VS Code docs wiki](https://github.com/microsoft/vscode-docs/wiki/VS-Code-glossary) + + +## Complete style guide + +Find all the details of the style guide in these files: + +- `./content/contribute/style/grammar.md` – Grammar rules +- `./content/contribute/style/formatting.md` – Formatting rules +- `./content/contribute/style/recommended-words.md` – Approved words and phrasing +- `./content/contribute/style/voice-tone.md` – Voice and tone guidance diff --git a/.github/labeler.yml b/.github/labeler.yml index 11cef0e77f6d..858575385b56 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -1,3 +1,9 @@ +area/ai: + - changed-files: + - any-glob-to-any-file: + - content/manuals/ai/** + - content/reference/cli/model/** + area/release: - changed-files: - any-glob-to-any-file: @@ -44,6 +50,11 @@ area/build-cloud: - any-glob-to-any-file: - content/manuals/build-cloud/** +area/cloud: + - changed-files: + - any-glob-to-any-file: + - content/manuals/cloud/** + area/compose: - changed-files: - any-glob-to-any-file: @@ -56,6 +67,11 @@ area/desktop: - any-glob-to-any-file: - content/manuals/desktop/** +area/dhi: + - changed-files: + - any-glob-to-any-file: + - content/manuals/dhi/** + area/engine: - changed-files: - any-glob-to-any-file: @@ -173,7 +189,6 @@ hugo: - hugo_stats.json - i18n/** - layouts/** - - postcss.config.js - static/** - tailwind.config.js diff --git a/.github/prompts/freshness-tier1.prompt.md b/.github/prompts/freshness-tier1.prompt.md new file mode 100644 index 000000000000..a26490a177dd --- /dev/null +++ b/.github/prompts/freshness-tier1.prompt.md @@ -0,0 +1,16 @@ +--- +mode: 'edit' +--- + +Imagine you're an experienced technical writer. You need to review content for +how fresh and up to date it is. Apply the following: + +1. Fix spelling errors and typos +2. Verify whether the markdown structure conforms to common markdown standards +3. Ensure the content follows our [style guide file](../instructions/styleguide-instructions.md) as a guide. +4. Make sure the titles on the page provide better context about the content (for an improved search experience). +5. Ensure all the components formatted correctly. +6. Improve the SEO keywords. +7. If you find numbered lists, make sure their numbering only uses 1's. + +Do your best and don't be lazy. \ No newline at end of file diff --git a/.github/prompts/freshness-tier2.prompt.md b/.github/prompts/freshness-tier2.prompt.md new file mode 100644 index 000000000000..e2935f12e4af --- /dev/null +++ b/.github/prompts/freshness-tier2.prompt.md @@ -0,0 +1,22 @@ +--- +mode: 'edit' +--- + +Imagine you're an experienced technical writer. You need to review content for +how fresh and up to date it is. Apply the following: + +1. Improve the presentational layer - components, splitting up the page into smaller pages + Consider the following: + + 1. Can you use tabs to display multiple variants of the same steps? + 2. Can you make a key item of information stand out with a call-out? + 3. Can you reduce a large amount of text to a series of bullet points? + 4. Are there other code components you could use? +2. Check if any operating systems or package versions mentioned are still current and supported +3. Check the accuracy of the content +4. If appropriate, follow the document from start to finish to see if steps make sense in sequence +5. Try to add some helpful next steps to the end of the document, but only if there are no *Next steps* or *Related pages* section, already. +6. Try to clarify, shorten or improve the efficiency of some sentences. +7. Check for LLM readibility. + +Do your best and don't be lazy. \ No newline at end of file diff --git a/.github/prompts/review.prompt.md b/.github/prompts/review.prompt.md new file mode 100644 index 000000000000..47a39e8e14c5 --- /dev/null +++ b/.github/prompts/review.prompt.md @@ -0,0 +1,7 @@ +--- +mode: edit +description: You are a technical writer reviewing an article for clarity, conciseness, and adherence to the documentation writing style guidelines. +--- +Review the article for clarity, conciseness, and adherence to our documentation [style guidelines](../instructions/styleguide-instructions.md). + +Provide concrete and practical suggestions for improvement. diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index b62404c71e05..fcafdecc7542 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -36,9 +36,6 @@ jobs: files: | docker-bake.hcl targets: releaser-build - set: | - *.cache-from=type=gha,scope=releaser - *.cache-to=type=gha,scope=releaser,mode=max build: runs-on: ubuntu-24.04 @@ -59,9 +56,6 @@ jobs: files: | docker-bake.hcl targets: release - set: | - *.cache-from=type=gha,scope=build - *.cache-to=type=gha,scope=build,mode=max - name: Check Cloudfront config uses: docker/bake-action@v6 @@ -74,17 +68,6 @@ jobs: AWS_CLOUDFRONT_ID: 0123456789ABCD AWS_LAMBDA_FUNCTION: DockerDocsRedirectFunction-dummy - vale: - if: ${{ github.event_name == 'pull_request' }} - runs-on: ubuntu-24.04 - steps: - - uses: actions/checkout@v4 - - uses: errata-ai/vale-action@reviewdog - env: - PIP_BREAK_SYSTEM_PACKAGES: 1 - with: - files: content - validate: runs-on: ubuntu-24.04 strategy: @@ -92,12 +75,17 @@ jobs: matrix: target: - lint + - vale - test - unused-media - test-go-redirects - dockerfile-lint - path-warnings + - validate-vendor steps: + - + name: Checkout + uses: actions/checkout@v4 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 @@ -105,11 +93,18 @@ jobs: name: Validate uses: docker/bake-action@v6 with: + source: . files: | docker-bake.hcl targets: ${{ matrix.target }} - set: | - *.args.BUILDKIT_CONTEXT_KEEP_GIT_DIR=1 - *.cache-to=type=gha,scope=validate-${{ matrix.target }},mode=max - *.cache-from=type=gha,scope=validate-${{ matrix.target }} - *.cache-from=type=gha,scope=build + - + name: Install reviewdog + if: ${{ matrix.target == 'vale' && github.event_name == 'pull_request' }} + uses: reviewdog/action-setup@e04ffabe3898a0af8d0fb1af00c188831c4b5893 # v1.3.2 + - + name: Run reviewdog for vale + if: ${{ matrix.target == 'vale' && github.event_name == 'pull_request' }} + run: | + cat ./tmp/vale.out | reviewdog -f=rdjsonl -name=vale -reporter=github-pr-annotations -fail-on-error=false -filter-mode=added -level=info + env: + REVIEWDOG_GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml new file mode 100644 index 000000000000..fa0d60c3b6de --- /dev/null +++ b/.github/workflows/codeql.yml @@ -0,0 +1,100 @@ +# For most projects, this workflow file will not need changing; you simply need +# to commit it to your repository. +# +# You may wish to alter this file to override the set of languages analyzed, +# or to provide custom queries or build logic. +# +# ******** NOTE ******** +# We have attempted to detect the languages in your repository. Please check +# the `language` matrix defined below to confirm you have the correct set of +# supported CodeQL languages. +# +name: "CodeQL Advanced" + +on: + push: + branches: [ "nameless" ] + pull_request: + branches: [ "nameless" ] + schedule: + - cron: '40 16 * * 4' + +jobs: + analyze: + name: Analyze (${{ matrix.language }}) + # Runner size impacts CodeQL analysis time. To learn more, please see: + # - https://gh.io/recommended-hardware-resources-for-running-codeql + # - https://gh.io/supported-runners-and-hardware-resources + # - https://gh.io/using-larger-runners (GitHub.com only) + # Consider using larger runners or machines with greater resources for possible analysis time improvements. + runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }} + permissions: + # required for all workflows + security-events: write + + # required to fetch internal or private CodeQL packs + packages: read + + # only required for workflows in private repositories + actions: read + contents: read + + strategy: + fail-fast: false + matrix: + include: + - language: go + build-mode: autobuild + - language: javascript-typescript + build-mode: none + # CodeQL supports the following values keywords for 'language': 'actions', 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'rust', 'swift' + # Use `c-cpp` to analyze code written in C, C++ or both + # Use 'java-kotlin' to analyze code written in Java, Kotlin or both + # Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both + # To learn more about changing the languages that are analyzed or customizing the build mode for your analysis, + # see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning. + # If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how + # your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + # Add any setup steps before running the `github/codeql-action/init` action. + # This includes steps like installing compilers or runtimes (`actions/setup-node` + # or others). This is typically only required for manual builds. + # - name: Setup runtime (example) + # uses: actions/setup-example@v1 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v3 + with: + languages: ${{ matrix.language }} + build-mode: ${{ matrix.build-mode }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + + # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs + # queries: security-extended,security-and-quality + + # If the analyze step fails for one of the languages you are analyzing with + # "We were unable to automatically build your code", modify the matrix above + # to set the build mode to "manual" for that language. Then modify this step + # to build your code. + # ℹ️ Command-line programs to run using the OS shell. + # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun + - if: matrix.build-mode == 'manual' + shell: bash + run: | + echo 'If you are using a "manual" build mode for one or more of the' \ + 'languages you are analyzing, replace this with the commands to build' \ + 'your code, for example:' + echo ' make bootstrap' + echo ' make release' + exit 1 + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v3 + with: + category: "/language:${{matrix.language}}" diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml index 8ce0b6285e80..793149808716 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/deploy.yml @@ -1,5 +1,5 @@ name: deploy - +# Deploys the Docker Docs website when merging to the `main` branch. concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true @@ -8,9 +8,8 @@ on: workflow_dispatch: push: branches: - - lab - main - - published + - lab env: # Use edge release of buildx (latest RC, fallback to latest stable) @@ -22,6 +21,8 @@ permissions: id-token: write contents: read +# The `main` branch is deployed to the production environment. +# The `lab` branch is deployed to a separate environment for testing purposes. jobs: publish: runs-on: ubuntu-24.04 @@ -30,26 +31,16 @@ jobs: - name: Prepare run: | - HUGO_ENV=development DOCS_AWS_REGION=us-east-1 + HUGO_ENV=production if [ "${{ github.ref }}" = "refs/heads/main" ]; then - HUGO_ENV=staging - DOCS_URL="https://docs-stage.docker.com" - DOCS_AWS_IAM_ROLE="arn:aws:iam::710015040892:role/stage-docs-docs.docker.com-20220818202135984800000001" - DOCS_S3_BUCKET="stage-docs-docs.docker.com" - DOCS_S3_CONFIG="s3-config.json" - DOCS_CLOUDFRONT_ID="E1R7CSW3F0X4H8" - DOCS_LAMBDA_FUNCTION_REDIRECTS="DockerDocsRedirectFunction-stage" - DOCS_SLACK_MSG="Successfully deployed docs-stage from main branch. $DOCS_URL" - elif [ "${{ github.ref }}" = "refs/heads/published" ]; then - HUGO_ENV=production DOCS_URL="https://docs.docker.com" DOCS_AWS_IAM_ROLE="arn:aws:iam::710015040892:role/prod-docs-docs.docker.com-20220818202218674300000001" DOCS_S3_BUCKET="prod-docs-docs.docker.com" DOCS_S3_CONFIG="s3-config.json" DOCS_CLOUDFRONT_ID="E228TTN20HNU8F" DOCS_LAMBDA_FUNCTION_REDIRECTS="DockerDocsRedirectFunction-prod" - DOCS_SLACK_MSG="Successfully deployed docs from published branch. $DOCS_URL" + DOCS_SLACK_MSG="Successfully deployed docs from the main branch. $DOCS_URL" elif [ "${{ github.ref }}" = "refs/heads/lab" ]; then HUGO_ENV=lab DOCS_URL="https://docs-labs.docker.com" @@ -96,9 +87,6 @@ jobs: files: | docker-bake.hcl targets: release - set: | - *.cache-from=type=gha,scope=deploy-${{ env.BRANCH_NAME }} - *.cache-to=type=gha,scope=deploy-${{ env.BRANCH_NAME }},mode=max provenance: false - name: Configure AWS Credentials @@ -134,8 +122,6 @@ jobs: files: | docker-bake.hcl targets: aws-s3-update-config - set: | - *.cache-from=type=gha,scope=releaser env: AWS_REGION: ${{ env.DOCS_AWS_REGION }} AWS_S3_BUCKET: ${{ env.DOCS_S3_BUCKET }} diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml deleted file mode 100644 index c4cb27d9fe51..000000000000 --- a/.github/workflows/merge.yml +++ /dev/null @@ -1,35 +0,0 @@ -name: merge - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -# open or update publishing PR when there is a push to main -on: - workflow_dispatch: - push: - branches: - - main - -jobs: - main-to-published: - runs-on: ubuntu-24.04 - if: github.repository_owner == 'docker' - steps: - - uses: actions/checkout@v4 - with: - ref: published - - name: Reset published branch - run: | - git fetch origin main:main - git reset --hard main - - name: Create Pull Request - uses: peter-evans/create-pull-request@67ccf781d68cd99b580ae25a5c18a1cc84ffff1f - with: - delete-branch: false - branch: published-update - commit-message: publish updates from main - labels: area/release - title: publish updates from main - body: | - Automated pull request for publishing docs updates. diff --git a/.github/workflows/validate-upstream.yml b/.github/workflows/validate-upstream.yml index 3f013523ca29..6a9c2903e3c2 100644 --- a/.github/workflows/validate-upstream.yml +++ b/.github/workflows/validate-upstream.yml @@ -96,9 +96,7 @@ jobs: files: | docker-bake.hcl targets: validate-upstream - set: | - *.cache-from=type=gha,scope=docs-upstream - *.cache-to=type=gha,scope=docs-upstream + provenance: false env: UPSTREAM_MODULE_NAME: ${{ inputs.module-name }} UPSTREAM_REPO: ${{ github.repository }} diff --git a/.gitignore b/.gitignore index 76d3b8dadbe0..fb19501a8140 100644 --- a/.gitignore +++ b/.gitignore @@ -1,9 +1,12 @@ +.hugo_build.lock +.idea/ +.vscode/mcp.json +.vscode/settings.json +.vscode/tasks.json **/.DS_Store **/desktop.ini -.vscode node_modules -.hugo_build.lock -resources public -tmp +resources static/pagefind +tmp diff --git a/.htmltest.yml b/.htmltest.yml index e7cb321e1bec..1be65b82355a 100644 --- a/.htmltest.yml +++ b/.htmltest.yml @@ -9,6 +9,7 @@ IgnoreDirectoryMissingTrailingSlash: true IgnoreURLs: - "^/reference/api/hub/.*$" - "^/reference/api/engine/v.+/#.*$" +- "^/reference/api/registry/.*$" IgnoreDirs: - "registry/configuration" - "compose/compose-file" # temporarily ignore until upstream is fixed diff --git a/.markdownlint.json b/.markdownlint.json index 58ab5995dd85..86037b36a7b8 100644 --- a/.markdownlint.json +++ b/.markdownlint.json @@ -13,7 +13,7 @@ "no-space-in-code": true, "no-space-in-links": true, "no-empty-links": true, - "ol-prefix": {"style": "ordered"}, + "ol-prefix": {"style": "one_or_ordered"}, "no-reversed-links": true, "reference-links-images": { "shortcut_syntax": false diff --git a/.vale-rdjsonl.tmpl b/.vale-rdjsonl.tmpl new file mode 100644 index 000000000000..662f973385c2 --- /dev/null +++ b/.vale-rdjsonl.tmpl @@ -0,0 +1,31 @@ +{{- /* Range over the linted files */ -}} + +{{- range .Files}} + +{{- $path := .Path -}} + +{{- /* Range over the file's alerts */ -}} + +{{- range .Alerts -}} + +{{- $error := "" -}} +{{- if eq .Severity "error" -}} + {{- $error = "ERROR" -}} +{{- else if eq .Severity "warning" -}} + {{- $error = "WARNING" -}} +{{- else -}} + {{- $error = "INFO" -}} +{{- end}} + +{{- /* Variables setup */ -}} + +{{- $line := printf "%d" .Line -}} +{{- $col := printf "%d" (index .Span 0) -}} +{{- $check := printf "%s" .Check -}} +{{- $message := printf "%s" .Message -}} + +{{- /* Output */ -}} + +{"message": "[{{ $check }}] {{ $message | jsonEscape }}", "location": {"path": "{{ $path }}", "range": {"start": {"line": {{ $line }}, "column": {{ $col }}}}}, "severity": "{{ $error }}"} +{{end -}} +{{end -}} diff --git a/.vale.ini b/.vale.ini index 710e13b2ff2f..b61d09a14862 100644 --- a/.vale.ini +++ b/.vale.ini @@ -15,3 +15,10 @@ TokenIgnores = ({{[%<] .* [%>]}}.*?{{[%<] ?/.* [%>]}}), \ # Exclude `{{< myshortcode `This is some HTML, ... >}}` BlockIgnores = (?sm)^({{[%<] \w+ [^{]*?\s[%>]}})\n$, \ (?s) *({{< highlight [^>]* ?>}}.*?{{< ?/ ?highlight >}}) + +# Disable rules for genered content +# Content is checked upstream +[**/{model-cli/docs/reference,content/reference/cli/docker/model}/**.md] +BasedOnStyles = Vale +Vale.Spelling = NO +Vale.Terms = NO diff --git a/.vscode/docker.code-snippets b/.vscode/docker.code-snippets new file mode 100644 index 000000000000..3be817d524d4 --- /dev/null +++ b/.vscode/docker.code-snippets @@ -0,0 +1,57 @@ +{ + "Insert Hugo Note Admonition": { + "prefix": ["admonition", "note"], + "body": ["> [!NOTE]", "> $1"], + "description": "Insert a Hugo note admonition", + }, + "Insert Hugo Important Admonition": { + "prefix": ["admonition", "important"], + "body": ["> [!IMPORTANT]", "> $1"], + "description": "Insert a Hugo important admonition", + }, + "Insert Hugo Warning Admonition": { + "prefix": ["admonition", "warning"], + "body": ["> [!WARNING]", "> $1"], + "description": "Insert a Hugo warning admonition", + }, + "Insert Hugo Tip Admonition": { + "prefix": ["admonition", "tip"], + "body": ["> [!TIP]", "> $1"], + "description": "Insert a Hugo tip admonition", + }, + "Insert Hugo Tabs": { + "prefix": ["admonition", "tabs"], + "body": [ + "", + "{{< tabs group=\"$1\" >}}", + "{{< tab name=\"$2\">}}", + "", + "$3", + "", + "{{< /tab >}}", + "{{< tab name=\"$4\">}}", + "", + "$5", + "", + "{{< /tab >}}", + "{{}}", + "", + ], + "description": "Insert a Hugo tabs block with two tabs and snippet stops for names and content", + }, + "Insert Hugo code block (no title)": { + "prefix": ["codeblock", "block"], + "body": ["```${1:json}", "$2", "```", ""], + "description": "Insert a Hugo code block with an optional title", + }, + "Insert Hugo code block (with title)": { + "prefix": ["codeblock", "codettl", "block"], + "body": ["```${1:json} {title=\"$2\"}", "$3", "```", ""], + "description": "Insert a Hugo code block with an optional title", + }, + "Insert a Button": { + "prefix": ["button"], + "body": ["{{< button url=\"$1\" text=\"$2\" >}}"], + "description": "Insert a Hugo button", + }, +} \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2ce05f85a896..e1cef1153c3c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -7,7 +7,7 @@ Our style guide and instructions on using our page templates and components is available in the [contribution section](https://docs.docker.com/contribute/) on the website. -The following guidelines describe the ways in which you can contribute to the +The following guidelines describe how to contribute to the Docker documentation at , and how to get started. ## Reporting issues @@ -91,6 +91,9 @@ To stop the development server: 1. In your terminal, press `` to exit the file watch mode of Compose. 2. Stop the Compose service with the `docker compose down` command. +> [!NOTE] +> Alternatively, if you have installed Hugo, you can build with `hugo serve`. + ### Testing Before you push your changes and open a pull request, we recommend that you diff --git a/Dockerfile b/Dockerfile index cfe29a65fbf6..60edca09d4ad 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,8 +2,9 @@ # check=skip=InvalidBaseImagePlatform ARG ALPINE_VERSION=3.21 -ARG GO_VERSION=1.23 +ARG GO_VERSION=1.24 ARG HTMLTEST_VERSION=0.17.0 +ARG VALE_VERSION=3.11.2 ARG HUGO_VERSION=0.141.0 ARG NODE_VERSION=22 ARG PAGEFIND_VERSION=1.3.0 @@ -14,7 +15,8 @@ RUN apk add --no-cache \ git \ nodejs \ npm \ - gcompat + gcompat \ + rsync # npm downloads Node.js dependencies FROM base AS npm @@ -66,6 +68,23 @@ COPY --from=build /project/public ./public ADD .htmltest.yml .htmltest.yml RUN htmltest +# vale +FROM jdkato/vale:v${VALE_VERSION} AS vale-run +WORKDIR /src +ARG GITHUB_ACTIONS +RUN --mount=type=bind,target=.,rw <&2 'ERROR: Vendor result differs. Please vendor your package with "make vendor"' + git status --porcelain -- go.mod go.sum _vendor + exit 1 +fi +EOT + # build-upstream builds an upstream project with a replacement module FROM build-base AS build-upstream # UPSTREAM_MODULE_NAME is the canonical upstream repository name and namespace (e.g. moby/buildkit) diff --git a/README.md b/README.md index 39500fb38823..d13785067dd8 100644 --- a/README.md +++ b/README.md @@ -1,51 +1,32 @@ # Docs @ Docker -Welcome to Docker Documentation +Welcome to Docker Documentation Welcome to the Docker Documentation repository. This is the source for [https://docs.docker.com/](https://docs.docker.com/). -Feel free to send us pull requests and file issues. Our docs are completely +Feel free to send us pull requests and issues. Our docs are completely open source, and we deeply appreciate contributions from the Docker community! ## Provide feedback -We’d love to hear your feedback. Please file documentation issues only in the -Docs GitHub repository. You can file a new issue to suggest improvements or if -you see any errors in the existing documentation. +We’d love to hear your feedback! To submit feedback: +- Click **[New issue](https://github.com/docker/docs/issues/new)** on the docs repository, or +- Click **Request changes** in the right column of every page on + [docs.docker.com](https://docs.docker.com/), or +- Click **Give feedback** on of every page in the docs. -Before submitting a new issue, check whether the issue has already been -reported. You can join the discussion using an emoji, or by adding a comment to -an existing issue. If possible, we recommend that you suggest a fix to the issue -by creating a pull request. - -You can ask general questions and get community support through the [Docker -Community Slack](https://dockr.ly/comm-slack). Personalized support is available +To get community support, use the [Docker Community Slack](https://dockr.ly/comm-slack). Personalized support is available through the Docker Pro, Team, and Business subscriptions. See [Docker Pricing](https://www.docker.com/pricing) for details. If you have an idea for a new feature or behavior change in a specific aspect of -Docker or have found a product bug, file that issue in the project's code +Docker or have found a product bug, file an issue in the project's repository. -We've made it easy for you to file new issues. - -- Click **[New issue](https://github.com/docker/docs/issues/new)** on the docs repository and fill in the details, or -- Click **Request docs changes** in the right column of every page on - [docs.docker.com](https://docs.docker.com/) and add the details, or - - ![Request changes link](/static/assets/images/docs-site-feedback.png) - -- Click the **Give feedback** link on the side of every page in the docs. - - ![Docs feedback on each page](/static/assets/images/feedback-widget.png) - ## Contribute to Docker docs -We value your contribution. We want to make it as easy as possible to submit -your contributions to the Docker docs repository. Changes to the docs are -handled through pull requests against the `main` branch. To learn how to -contribute, see [CONTRIBUTING.md](CONTRIBUTING.md). +See [CONTRIBUTING.md](CONTRIBUTING.md). ## Copyright and license diff --git a/_vale/.vale-config/0-Hugo.ini b/_vale/.vale-config/0-Hugo.ini deleted file mode 100644 index 4347ca9e902a..000000000000 --- a/_vale/.vale-config/0-Hugo.ini +++ /dev/null @@ -1,10 +0,0 @@ -[*.md] -# Exclude `{{< ... >}}`, `{{% ... %}}`, [Who]({{< ... >}}) -TokenIgnores = ({{[%<] .* [%>]}}.*?{{[%<] ?/.* [%>]}}), \ -(\[.+\]\({{< .+ >}}\)), \ -[^\S\r\n]({{[%<] \w+ .+ [%>]}})\s, \ -[^\S\r\n]({{[%<](?:/\*) .* (?:\*/)[%>]}})\s - -# Exclude `{{< myshortcode `This is some HTML, ... >}}` -BlockIgnores = (?sm)^({{[%<] \w+ [^{]*?\s[%>]}})\n$, \ -(?s) *({{< highlight [^>]* ?>}}.*?{{< ?/ ?highlight >}}) diff --git a/_vale/Docker/Acronyms.yml b/_vale/Docker/Acronyms.yml index 866d870d3490..d6d9fdde6bde 100644 --- a/_vale/Docker/Acronyms.yml +++ b/_vale/Docker/Acronyms.yml @@ -8,16 +8,20 @@ first: '\b([A-Z]{2,5})\b' second: '(?:\b[A-Z][a-z]+ )+\(([A-Z]{2,5})s?\)' # ... with the exception of these: exceptions: + - ACH - AGPL - AI - API - ARM + - ARP - ASP - AUFS - AWS + - BGP # Border Gateway Protocol - BIOS - BPF - BSD + - CDI - CFS - CI - CIDR @@ -26,13 +30,16 @@ exceptions: - CNCF - CORS - CPU + - CSI - CSS - CSV - CUDA - CVE + - DAD - DCT - DEBUG - DHCP + - DMR - DNS - DOM - DPI @@ -48,6 +55,7 @@ exceptions: - GDB - GET - GHSA + - GNOME - GNU - GPG - GPL @@ -55,6 +63,7 @@ exceptions: - GRUB - GTK - GUI + - GUID - HEAD - HTML - HTTP @@ -66,13 +75,20 @@ exceptions: - IP - IPAM - IPC + - IT - JAR + - JIT - JSON - JSX + - KDE - LESS - LLDB + - LLM - LTS - MAC + - MATE + - mcp + - MCP - MDM - MDN - MSI @@ -91,10 +107,11 @@ exceptions: - PATH - PDF - PEM - - PID - PHP + - PID - POSIX - POST + - QA - QEMU - RAM - REPL @@ -109,6 +126,7 @@ exceptions: - SCIM - SCM - SCSS + - SCTP - SDK - SLES - SLSA @@ -129,6 +147,7 @@ exceptions: - TTY - TXT - UDP + - UI - URI - URL - USB @@ -137,6 +156,7 @@ exceptions: - UTS - UUID - VAT + - VDI - VIP - VLAN - VM diff --git a/_vale/Docker/Forbidden.yml b/_vale/Docker/Forbidden.yml new file mode 100644 index 000000000000..d8b7a37ae8c9 --- /dev/null +++ b/_vale/Docker/Forbidden.yml @@ -0,0 +1,6 @@ +extends: substitution +message: "Use '%s' instead of '%s'." +level: error +ignorecase: false +swap: + Docker CE: Docker Engine diff --git a/_vale/config/vocabularies/Docker/accept.txt b/_vale/config/vocabularies/Docker/accept.txt index c07c79a1cd7f..ece3d737b16a 100644 --- a/_vale/config/vocabularies/Docker/accept.txt +++ b/_vale/config/vocabularies/Docker/accept.txt @@ -1,27 +1,41 @@ (?i)[A-Z]{2,}'?s +Adreno +Aleksandrov Amazon Anchore Apple Artifactory Azure +bootup Btrfs BuildKit BusyBox CentOS Ceph +cgroup Chrome Chrome DevTools +Citrix CloudFront Codefresh Codespaces -CouchDB +config +containerd Couchbase +CouchDB +datacenter Datadog Ddosify Debootstrap +deprovisioning +deserialization +deserialize Dev Dev Environments? +Dex +displayName Django +DMR Docker Build Cloud Docker Business Docker Dasboard @@ -31,70 +45,129 @@ Docker Extension Docker Hub Docker Scout Docker Team -Docker's Docker-Sponsored Open Source +Docker's Dockerfile +dockerignore Dockerize Dockerizing Entra +EPERM Ethernet Fargate Fedora +firewalld Flink +fluentd +g?libc GeoNetwork +GGUF Git GitHub( Actions)? Google Grafana Gravatar +gRPC HyperKit -IPv[46] -IPvlan +inferencing +inotify Intel Intune -JFrog +iptables +IPv[46] +IPvlan +isort Jamf JetBrains +JFrog +JUnit Kerberos Kitematic +Kubeadm +kubectl +kubefwd +kubelet Kubernetes -Laravel Laradock +Laravel +libseccomp Linux LinuxKit Logstash +lookup Mac +macOS +macvlan Mail(chimp|gun) +mfsymlinks Microsoft +minikube +monorepos? +musl MySQL -NFSv\d +nameserver +namespace +namespacing +netfilter +netlabel Netplan +NFSv\d Nginx +npm +Nutanix Nuxeo +NVIDIA OAuth -OTel Okta -PKG +Ollama +osquery +osxfs +OTel Paketo +pgAdmin +PKG Postgres PowerShell Python +Qualcomm +rollback +rootful +runc +Ryuk S3 -SQLite +scrollable Slack +snapshotters? Snyk Solr SonarQube +SQLite +stdin +stdout +subfolder Syft +syntaxes Sysbox +sysctls Sysdig +systemd Testcontainers +tmpfs Traefik +Trixie Ubuntu +ufw +uid +umask Unix +unmanaged VMware +vpnkit +vSphere +VSCode Wasm Windows +windowsfilter WireMock Xdebug Zscaler @@ -130,6 +203,7 @@ Zsh [Pp]rocfs [Pp]roxied [Pp]roxying +[pP]yright [Rr]eal-time [Rr]egex(es)? [Rr]untimes? @@ -145,6 +219,7 @@ Zsh [Ss]warm [Ss]yscalls? [Ss]ysfs +[Tt]eardown [Tt]oolchains? [Uu]narchived? [Uu]ngated @@ -154,51 +229,4 @@ Zsh [Vv]irtiofs [Vv]irtualize [Ww]alkthrough -bootup -cgroup -config -containerd -datacenter -deprovisioning -deserialization -deserialize -displayName -dockerignore -firewalld -g?libc -gRPC -inotify -iptables -kubectl -kubefwd -kubelet -lookup -macOS -macvlan -mfsymlinks -minikube -monorepos? -musl -nameserver -namespace -namespacing -npm -osquery -osxfs -pgAdmin -rollback -rootful -runc -snapshotters? -stdin -stdout -syntaxes -sysctls -systemd -tmpfs -ufw -uid -umask -vSphere -vpnkit -windowsfilter + diff --git a/_vendor/github.com/docker/buildx/docs/bake-reference.md b/_vendor/github.com/docker/buildx/docs/bake-reference.md index 192dded4fe60..22592ec373ba 100644 --- a/_vendor/github.com/docker/buildx/docs/bake-reference.md +++ b/_vendor/github.com/docker/buildx/docs/bake-reference.md @@ -221,10 +221,14 @@ The following table shows the complete list of attributes that you can assign to | [`attest`](#targetattest) | List | Build attestations | | [`cache-from`](#targetcache-from) | List | External cache sources | | [`cache-to`](#targetcache-to) | List | External cache destinations | +| [`call`](#targetcall) | String | Specify the frontend method to call for the target. | | [`context`](#targetcontext) | String | Set of files located in the specified path or URL | | [`contexts`](#targetcontexts) | Map | Additional build contexts | +| [`description`](#targetdescription) | String | Description of a target | | [`dockerfile-inline`](#targetdockerfile-inline) | String | Inline Dockerfile string | | [`dockerfile`](#targetdockerfile) | String | Dockerfile location | +| [`entitlements`](#targetentitlements) | List | Permissions that the build process requires to run | +| [`extra-hosts`](#targetextra-hosts) | List | Customs host-to-IP mapping | | [`inherits`](#targetinherits) | List | Inherit attributes from other targets | | [`labels`](#targetlabels) | Map | Metadata for images | | [`matrix`](#targetmatrix) | Map | Define a set of variables that forks a target into multiple targets. | @@ -283,19 +287,11 @@ The key takes a list of annotations, in the format of `KEY=VALUE`. ```hcl target "default" { - output = ["type=image,name=foo"] + output = [{ type = "image", name = "foo" }] annotations = ["org.opencontainers.image.authors=dvdksn"] } ``` -is the same as - -```hcl -target "default" { - output = ["type=image,name=foo,annotation.org.opencontainers.image.authors=dvdksn"] -} -``` - By default, the annotation is added to image manifests. You can configure the level of the annotations by adding a prefix to the annotation, containing a comma-separated list of all the levels that you want to annotate. The following @@ -303,7 +299,12 @@ example adds annotations to both the image index and manifests. ```hcl target "default" { - output = ["type=image,name=foo"] + output = [ + { + type = "image" + name = "foo" + } + ] annotations = ["index,manifest:org.opencontainers.image.authors=dvdksn"] } ``` @@ -319,8 +320,13 @@ This attribute accepts the long-form CSV version of attestation parameters. ```hcl target "default" { attest = [ - "type=provenance,mode=min", - "type=sbom" + { + type = "provenance" + mode = "max" + }, + { + type = "sbom" + } ] } ``` @@ -336,8 +342,15 @@ This takes a list value, so you can specify multiple cache sources. ```hcl target "app" { cache-from = [ - "type=s3,region=eu-west-1,bucket=mybucket", - "user/repo:cache", + { + type = "s3" + region = "eu-west-1" + bucket = "mybucket" + }, + { + type = "registry" + ref = "user/repo:cache" + } ] } ``` @@ -353,8 +366,14 @@ This takes a list value, so you can specify multiple cache export targets. ```hcl target "app" { cache-to = [ - "type=s3,region=eu-west-1,bucket=mybucket", - "type=inline" + { + type = "s3" + region = "eu-west-1" + bucket = "mybucket" + }, + { + type = "inline" + } ] } ``` @@ -371,6 +390,13 @@ target "app" { } ``` +Supported values are: + +- `build` builds the target (default) +- `check`: evaluates [build checks](https://docs.docker.com/build/checks/) for the target +- `outline`: displays the target's build arguments and their default values if available +- `targets`: lists all Bake targets in the loaded definition, along with its [description](#targetdescription). + For more information about frontend methods, refer to the CLI reference for [`docker buildx build --call`](https://docs.docker.com/reference/cli/docker/buildx/build/#call). @@ -426,9 +452,9 @@ a context based on the pattern of the context value. ```hcl # docker-bake.hcl target "app" { - contexts = { - alpine = "docker-image://alpine:3.13" - } + contexts = { + alpine = "docker-image://alpine:3.13" + } } ``` @@ -443,9 +469,9 @@ RUN echo "Hello world" ```hcl # docker-bake.hcl target "app" { - contexts = { - src = "../path/to/source" - } + contexts = { + src = "../path/to/source" + } } ``` @@ -466,12 +492,13 @@ COPY --from=src . . ```hcl # docker-bake.hcl target "base" { - dockerfile = "baseapp.Dockerfile" + dockerfile = "baseapp.Dockerfile" } + target "app" { - contexts = { - baseapp = "target:base" - } + contexts = { + baseapp = "target:base" + } } ``` @@ -481,6 +508,25 @@ FROM baseapp RUN echo "Hello world" ``` +### `target.description` + +Defines a human-readable description for the target, clarifying its purpose or +functionality. + +```hcl +target "lint" { + description = "Runs golangci-lint to detect style errors" + args = { + GOLANGCI_LINT_VERSION = null + } + dockerfile = "lint.Dockerfile" +} +``` + +This attribute is useful when combined with the `docker buildx bake --list=targets` +option, providing a more informative output when listing the available build +targets in a Bake file. + ### `target.dockerfile-inline` Uses the string value as an inline Dockerfile for the build target. @@ -539,6 +585,20 @@ target "integration-tests" { Entitlements are enabled with a two-step process. First, a target must declare the entitlements it requires. Secondly, when invoking the `bake` command, the user must grant the entitlements by passing the `--allow` flag or confirming the entitlements when prompted in an interactive terminal. This is to ensure that the user is aware of the possibly insecure permissions they are granting to the build process. +### `target.extra-hosts` + +Use the `extra-hosts` attribute to define customs host-to-IP mapping for the +target. This has the same effect as passing a [`--add-host`][add-host] flag to +the build command. + +```hcl +target "default" { + extra-hosts = { + my_hostname = "8.8.8.8" + } +} +``` + ### `target.inherits` A target can inherit attributes from other targets. @@ -835,7 +895,7 @@ The following example configures the target to use a cache-only output, ```hcl target "default" { - output = ["type=cacheonly"] + output = [{ type = "cacheonly" }] } ``` @@ -875,8 +935,15 @@ variable "HOME" { target "default" { secret = [ - "type=env,id=KUBECONFIG", - "type=file,id=aws,src=${HOME}/.aws/credentials" + { + type = "env" + id = "KUBECONFIG" + }, + { + type = "file" + id = "aws" + src = "${HOME}/.aws/credentials" + } ] } ``` @@ -920,7 +987,7 @@ This can be useful if you need to access private repositories during a build. ```hcl target "default" { - ssh = ["default"] + ssh = [{ id = "default" }] } ``` @@ -1030,6 +1097,7 @@ or interpolate them in attribute values in your Bake file. ```hcl variable "TAG" { + type = string default = "latest" } @@ -1051,6 +1119,206 @@ overriding the default `latest` value shown in the previous example. $ TAG=dev docker buildx bake webapp-dev ``` +Variables can also be assigned an explicit type. +If provided, it will be used to validate the default value (if set), as well as any overrides. +This is particularly useful when using complex types which are intended to be overridden. +The previous example could be expanded to apply an arbitrary series of tags. +```hcl +variable "TAGS" { + default = ["latest"] + type = list(string) +} + +target "webapp-dev" { + dockerfile = "Dockerfile.webapp" + tags = [for tag in TAGS: "docker.io/username/webapp:${tag}"] +} +``` + +This example shows how to generate three tags without changing the file +or using custom functions/parsing: +```console +$ TAGS=dev,latest,2 docker buildx bake webapp-dev +``` + +### Variable typing + +The following primitive types are available: +* `string` +* `number` +* `bool` + +The type is expressed like a keyword; it must be expressed as a literal: +```hcl +variable "OK" { + type = string +} + +# cannot be an actual string +variable "BAD" { + type = "string" +} + +# cannot be the result of an expression +variable "ALSO_BAD" { + type = lower("string") +} +``` +Specifying primitive types can be valuable to show intent (especially when a default is not provided), +but bake will generally behave as expected without explicit typing. + +Complex types are expressed with "type constructors"; they are: +* `tuple([,...])` +* `list()` +* `set()` +* `map()` +* `object({=},...})` + +The following are examples of each of those, as well as how the (optional) default value would be expressed: +```hcl +# structured way to express "1.2.3-alpha" +variable "MY_VERSION" { + type = tuple([number, number, number, string]) + default = [1, 2, 3, "alpha"] +} + +# JDK versions used in a matrix build +variable "JDK_VERSIONS" { + type = list(number) + default = [11, 17, 21] +} + +# better way to express the previous example; this will also +# enforce set semantics and allow use of set-based functions +variable "JDK_VERSIONS" { + type = set(number) + default = [11, 17, 21] +} + +# with the help of lookup(), translate a 'feature' to a tag +variable "FEATURE_TO_NAME" { + type = map(string) + default = {featureA = "slim", featureB = "tiny"} +} + +# map a branch name to a registry location +variable "PUSH_DESTINATION" { + type = object({branch = string, registry = string}) + default = {branch = "main", registry = "prod-registry.invalid.com"} +} + +# make the previous example more useful with composition +variable "PUSH_DESTINATIONS" { + type = list(object({branch = string, registry = string})) + default = [ + {branch = "develop", registry = "test-registry.invalid.com"}, + {branch = "main", registry = "prod-registry.invalid.com"}, + ] +} +``` +Note that in each example, the default value would be valid even if typing was not present. +If typing was omitted, the first three would all be considered `tuple`; +you would be restricted to functions that operate on `tuple` and, for example, not be able to add elements. +Similarly, the third and fourth would both be considered `object`, with the limits and semantics of that type. +In short, in the absence of a type, any value delimited with `[]` is a `tuple` +and value delimited with `{}` is an `object`. +Explicit typing for complex types not only opens up the ability to use functions applicable to that specialized type, +but is also a precondition for providing overrides. + +> [!NOTE] +> See [HCL Type Expressions][typeexpr] page for more details. + +### Overriding variables + +As mentioned in the [intro to variables](#variable), primitive types (`string`, `number`, and `bool`) +can be overridden without typing and will generally behave as expected. +(When explicit typing is not provided, a variable is assumed to be primitive when the default value lacks `{}` or `[]` delimiters; +a variable with neither typing nor a default value is treated as `string`.) +Naturally, these same overrides can be used alongside explicit typing too; +they may help in edge cases where you want `VAR=true` to be a `string`, where without typing, +it may be a `string` or a `bool` depending on how/where it's used. +Overriding a variable with a complex type can only be done when the type is provided. +This is still done via environment variables, but the values can be provided via CSV or JSON. + +#### CSV overrides + +This is considered the canonical method and is well suited to interactive usage. +It is assumed that `list` and `set` will be the most common complex type, +as well as the most common complex type designed to be overridden. +Thus, there is full CSV support for `list` and `set` +(and `tuple`; despite being considered a structural type, it is more like a collection type in this regard). + + +There is limited support for `map` and `object` and no support for composite types; +for these advanced cases, an alternative mechanism [using JSON](#json-overrides) is available. + +#### JSON overrides + +Overrides can also be provided via JSON. +This is the only method available for providing some complex types and may be convenient if overrides are already JSON +(for example, if they come from a JSON API). +It can also be used when dealing with values are difficult or impossible to specify using CSV (e.g., values containing quotes or commas). +To use JSON, simply append `_JSON` to the variable name. +In this contrived example, CSV cannot handle the second value; despite being a supported CSV type, JSON must be used: +```hcl +variable "VALS" { + type = list(string) + default = ["some", "list"] +} +``` +```console +$ cat data.json +["hello","with,comma","with\"quote"] +$ VALS_JSON=$(< data.json) docker buildx bake + +# CSV equivalent, though the second value cannot be expressed at all +$ VALS='hello,"with""quote"' docker buildx bake +``` + +This example illustrates some precedence and usage rules: +```hcl +variable "FOO" { + type = string + default = "foo" +} + +variable "FOO_JSON" { + type = string + default = "foo" +} +``` + +The variable `FOO` can *only* be overridden using CSV because `FOO_JSON`, which would typically used for a JSON override, +is already a defined variable. +Since `FOO_JSON` is an actual variable, setting that environment variable would be expected to a CSV value. +A JSON override *is* possible for this variable, using environment variable `FOO_JSON_JSON`. + +```Console +# These three are all equivalent, setting variable FOO=bar +$ FOO=bar docker buildx bake <...> +$ FOO='bar' docker buildx bake <...> +$ FOO="bar" docker buildx bake <...> + +# Sets *only* variable FOO_JSON; FOO is untouched +$ FOO_JSON=bar docker buildx bake <...> + +# This also sets FOO_JSON, but will fail due to not being valid JSON +$ FOO_JSON_JSON=bar docker buildx bake <...> + +# These are all equivalent +$ cat data.json +"bar" +$ FOO_JSON_JSON=$(< data.json) docker buildx bake <...> +$ FOO_JSON_JSON='"bar"' docker buildx bake <...> +$ FOO_JSON=bar docker buildx bake <...> + +# This results in setting two different variables, both specified as CSV (FOO=bar and FOO_JSON="baz") +$ FOO=bar FOO_JSON='"baz"' docker buildx bake <...> + +# These refer to the same variable with FOO_JSON_JSON having precedence and read as JSON (FOO_JSON=baz) +$ FOO_JSON=bar FOO_JSON_JSON='"baz"' docker buildx bake <...> +``` + ### Built-in variables The following variables are built-ins that you can use with Bake without having @@ -1170,6 +1438,7 @@ target "webapp-dev" { +[add-host]: https://docs.docker.com/reference/cli/docker/buildx/build/#add-host [attestations]: https://docs.docker.com/build/attestations/ [bake_stdlib]: https://github.com/docker/buildx/blob/master/bake/hclparser/stdlib.go [build-arg]: https://docs.docker.com/reference/cli/docker/image/build/#build-arg @@ -1188,4 +1457,5 @@ target "webapp-dev" { [ssh]: https://docs.docker.com/reference/cli/docker/buildx/build/#ssh [tag]: https://docs.docker.com/reference/cli/docker/image/build/#tag [target]: https://docs.docker.com/reference/cli/docker/image/build/#target +[typeexpr]: https://github.com/hashicorp/hcl/tree/main/ext/typeexpr [userfunc]: https://github.com/hashicorp/hcl/tree/main/ext/userfunc diff --git a/_vendor/github.com/docker/cli/docs/deprecated.md b/_vendor/github.com/docker/cli/docs/deprecated.md index 2748dd925492..1a3c353da69f 100644 --- a/_vendor/github.com/docker/cli/docs/deprecated.md +++ b/_vendor/github.com/docker/cli/docs/deprecated.md @@ -53,14 +53,17 @@ The following table provides an overview of the current status of deprecated fea | Status | Feature | Deprecated | Remove | |------------|------------------------------------------------------------------------------------------------------------------------------------|------------|--------| -| Deprecated | [Non-standard fields in image inspect](#non-standard-fields-in-image-inspect) | v27.0 | v28.0 | -| Deprecated | [API CORS headers](#api-cors-headers) | v27.0 | v28.0 | -| Deprecated | [Graphdriver plugins (experimental)](#graphdriver-plugins-experimental) | v27.0 | v28.0 | +| Deprecated | [Empty/nil fields in image Config from inspect API](#emptynil-fields-in-image-config-from-inspect-api) | v28.3 | v29.0 | +| Deprecated | [Configuration for pushing non-distributable artifacts](#configuration-for-pushing-non-distributable-artifacts) | v28.0 | v29.0 | +| Deprecated | [`--time` option on `docker stop` and `docker restart`](#--time-option-on-docker-stop-and-docker-restart) | v28.0 | - | +| Removed | [Non-standard fields in image inspect](#non-standard-fields-in-image-inspect) | v27.0 | v28.2 | +| Removed | [API CORS headers](#api-cors-headers) | v27.0 | v28.0 | +| Removed | [Graphdriver plugins (experimental)](#graphdriver-plugins-experimental) | v27.0 | v28.0 | | Deprecated | [Unauthenticated TCP connections](#unauthenticated-tcp-connections) | v26.0 | v28.0 | -| Deprecated | [`Container` and `ContainerConfig` fields in Image inspect](#container-and-containerconfig-fields-in-image-inspect) | v25.0 | v26.0 | -| Deprecated | [Deprecate legacy API versions](#deprecate-legacy-api-versions) | v25.0 | v26.0 | +| Removed | [`Container` and `ContainerConfig` fields in Image inspect](#container-and-containerconfig-fields-in-image-inspect) | v25.0 | v26.0 | +| Removed | [Deprecate legacy API versions](#deprecate-legacy-api-versions) | v25.0 | v26.0 | | Removed | [Container short ID in network Aliases field](#container-short-id-in-network-aliases-field) | v25.0 | v26.0 | -| Deprecated | [IsAutomated field, and `is-automated` filter on `docker search`](#isautomated-field-and-is-automated-filter-on-docker-search) | v25.0 | v26.0 | +| Removed | [IsAutomated field, and `is-automated` filter on `docker search`](#isautomated-field-and-is-automated-filter-on-docker-search) | v25.0 | v28.2 | | Removed | [logentries logging driver](#logentries-logging-driver) | v24.0 | v25.0 | | Removed | [OOM-score adjust for the daemon](#oom-score-adjust-for-the-daemon) | v24.0 | v25.0 | | Removed | [BuildKit build information](#buildkit-build-information) | v23.0 | v24.0 | @@ -69,7 +72,7 @@ The following table provides an overview of the current status of deprecated fea | Removed | [Btrfs storage driver on CentOS 7 and RHEL 7](#btrfs-storage-driver-on-centos-7-and-rhel-7) | v20.10 | v23.0 | | Removed | [Support for encrypted TLS private keys](#support-for-encrypted-tls-private-keys) | v20.10 | v23.0 | | Removed | [Kubernetes stack and context support](#kubernetes-stack-and-context-support) | v20.10 | v23.0 | -| Deprecated | [Pulling images from non-compliant image registries](#pulling-images-from-non-compliant-image-registries) | v20.10 | - | +| Removed | [Pulling images from non-compliant image registries](#pulling-images-from-non-compliant-image-registries) | v20.10 | v28.2 | | Removed | [Linux containers on Windows (LCOW)](#linux-containers-on-windows-lcow-experimental) | v20.10 | v23.0 | | Deprecated | [BLKIO weight options with cgroups v1](#blkio-weight-options-with-cgroups-v1) | v20.10 | - | | Removed | [Kernel memory limit](#kernel-memory-limit) | v20.10 | v23.0 | @@ -78,9 +81,9 @@ The following table provides an overview of the current status of deprecated fea | Deprecated | [CLI plugins support](#cli-plugins-support) | v20.10 | - | | Deprecated | [Dockerfile legacy `ENV name value` syntax](#dockerfile-legacy-env-name-value-syntax) | v20.10 | - | | Removed | [`docker build --stream` flag (experimental)](#docker-build---stream-flag-experimental) | v20.10 | v20.10 | -| Deprecated | [`fluentd-async-connect` log opt](#fluentd-async-connect-log-opt) | v20.10 | - | +| Removed | [`fluentd-async-connect` log opt](#fluentd-async-connect-log-opt) | v20.10 | v28.0 | | Removed | [Configuration options for experimental CLI features](#configuration-options-for-experimental-cli-features) | v19.03 | v23.0 | -| Deprecated | [Pushing and pulling with image manifest v2 schema 1](#pushing-and-pulling-with-image-manifest-v2-schema-1) | v19.03 | v27.0 | +| Removed | [Pushing and pulling with image manifest v2 schema 1](#pushing-and-pulling-with-image-manifest-v2-schema-1) | v19.03 | v28.2 | | Removed | [`docker engine` subcommands](#docker-engine-subcommands) | v19.03 | v20.10 | | Removed | [Top-level `docker deploy` subcommand (experimental)](#top-level-docker-deploy-subcommand-experimental) | v19.03 | v20.10 | | Removed | [`docker stack deploy` using "dab" files (experimental)](#docker-stack-deploy-using-dab-files-experimental) | v19.03 | v20.10 | @@ -118,10 +121,86 @@ The following table provides an overview of the current status of deprecated fea | Removed | [`--run` flag on `docker commit`](#--run-flag-on-docker-commit) | v0.10 | v1.13 | | Removed | [Three arguments form in `docker import`](#three-arguments-form-in-docker-import) | v0.6.7 | v1.12 | +### Empty/nil fields in image Config from inspect API + +**Deprecated in Release: v28.3** +**Target For Removal In Release: v29.0** + +The `Config` field returned by `docker image inspect` (and the `GET /images/{name}/json` +API endpoint) currently includes certain fields even when they are empty or nil. +Starting in Docker v29.0, the following fields will be omitted from the API response +when they contain empty or default values: + +- `Cmd` +- `Entrypoint` +- `Env` +- `Labels` +- `OnBuild` +- `User` +- `Volumes` +- `WorkingDir` + +Applications consuming the image inspect API should be updated to handle the +absence of these fields gracefully, treating missing fields as having their +default/empty values. + +For API version corresponding to Docker v29.0, these fields will be omitted when +empty. They will continue to be included when using clients that request an older +API version for backward compatibility. + +### Configuration for pushing non-distributable artifacts + +**Deprecated in Release: v28.0** +**Target For Removal In Release: v29.0** + +Non-distributable artifacts (also called foreign layers) were introduced in +docker v1.12 to accommodate Windows images for which the EULA did not allow +layers to be distributed through registries other than those hosted by Microsoft. +The concept of foreign / non-distributable layers was adopted by the OCI distribution +spec in [oci#233]. These restrictions were relaxed later to allow distributing +these images through non-public registries, for which a configuration was added +in Docker v17.0.6.0. + +In 2022, Microsoft updated the EULA and [removed these restrictions][msft-3645201], +followed by the OCI distribution specification deprecating foreign layers in [oci#965]. +In 2023, Microsoft [removed the use of foreign data layers][msft-3846833] for their images, +making this functionality obsolete. + +Docker v28.0 deprecates the `--allow-nondistributable-artifacts` daemon flag and +corresponding `allow-nondistributable-artifacts` field in `daemon.json`. Setting +either option no longer takes an effect, but a deprecation warning log is added +to raise awareness about the deprecation. This warning is planned to become an +error in the Docker v29.0. + +Users currently using these options are therefore recommended to remove this +option from their configuration to prevent the daemon from starting when +upgrading to Docker v29.0. + +The `AllowNondistributableArtifactsCIDRs` and `AllowNondistributableArtifactsHostnames` +fields in the `RegistryConfig` of the `GET /info` API response are also deprecated. +For API version v1.48 and lower, the fields are still included in the response +but always `null`. In API version v1.49 and higher, the field will be omitted +entirely. + +[oci#233]: https://github.com/opencontainers/image-spec/pull/233 +[oci#965]: https://github.com/opencontainers/image-spec/pull/965 +[msft-3645201]: https://techcommunity.microsoft.com/blog/containers/announcing-windows-container-base-image-redistribution-rights-change/3645201 +[msft-3846833]: https://techcommunity.microsoft.com/blog/containers/announcing-removal-of-foreign-layers-from-windows-container-images/3846833 + +### `--time` option on `docker stop` and `docker restart` + +**Deprecated in Release: v28.0** + +The `--time` option for the `docker stop`, `docker container stop`, `docker restart`, +and `docker container restart` commands has been renamed to `--timeout` for +consistency with other uses of timeout options. The `--time` option is now +deprecated and hidden, but remains functional for backward compatibility. +Users are encouraged to migrate to using the `--timeout` option instead. + ### Non-standard fields in image inspect **Deprecated in Release: v27.0** -**Target For Removal In Release: v28.0** +**Removed In Release: v28.2** The `Config` field returned shown in `docker image inspect` (and as returned by the `GET /images/{name}/json` API endpoint) returns additional fields that are @@ -133,8 +212,9 @@ but are not omitted in the response when left empty. As these fields were not intended to be part of the image configuration response, they are deprecated, and will be removed from the API in thee next release. -The following fields are currently included in the API response, but are not -part of the underlying image's `Config` field, and deprecated: +The following fields are not part of the underlying image's `Config` field, and +removed in the API response for API v1.50 and newer, corresponding with v28.2. +They continue to be included when using clients that use an older API version: - `Hostname` - `Domainname` @@ -145,9 +225,9 @@ part of the underlying image's `Config` field, and deprecated: - `OpenStdin` - `StdinOnce` - `Image` -- `NetworkDisabled` (already omitted unless set) -- `MacAddress` (already omitted unless set) -- `StopTimeout` (already omitted unless set) +- `NetworkDisabled` (omitted unless set on older API versions) +- `MacAddress` (omitted unless set on older API versions) +- `StopTimeout` (omitted unless set on older API versions) [Docker image specification]: https://github.com/moby/docker-image-spec/blob/v1.3.1/specs-go/v1/image.go#L19-L32 [OCI image specification]: https://github.com/opencontainers/image-spec/blob/v1.1.0/specs-go/v1/config.go#L24-L62 @@ -159,36 +239,31 @@ part of the underlying image's `Config` field, and deprecated: **Target For Removal In Release: v28.0** [Graphdriver plugins](https://github.com/docker/cli/blob/v26.1.4/docs/extend/plugins_graphdriver.md) -are an experimental feature that allow extending the Docker Engine with custom +were an experimental feature that allowed extending the Docker Engine with custom storage drivers for storing images and containers. This feature was not -maintained since its inception, and will no longer be supported in upcoming -releases. - -Support for graphdriver plugins is disabled by default in v27.0, and will be -removed v28.0. An `DOCKERD_DEPRECATED_GRAPHDRIVER_PLUGINS` environment variable -is provided in v27.0 to re-enable the feature. This environment variable must -be set to a non-empty value in the daemon's environment. +maintained since its inception. -The `DOCKERD_DEPRECATED_GRAPHDRIVER_PLUGINS` environment variable, along with -support for graphdriver plugins, will be removed in v28.0. Users of this feature -are recommended to instead configure the Docker Engine to use the [containerd image store](https://docs.docker.com/storage/containerd/) +Support for graphdriver plugins was disabled by default in v27.0, and removed +in v28.0. Users of this feature are recommended to instead configure the Docker +Engine to use the [containerd image store](https://docs.docker.com/storage/containerd/) and a custom [snapshotter](https://github.com/containerd/containerd/tree/v1.7.18/docs/snapshotters) ### API CORS headers **Deprecated in Release: v27.0** -**Target For Removal In Release: v28.0** +**Disabled by default in Release: v27.0** +**Removed in release: v28.0** The `api-cors-header` configuration option for the Docker daemon is insecure, and is therefore deprecated and scheduled for removal. Incorrectly setting this option could leave a window of opportunity for unauthenticated cross-origin requests to be accepted by the daemon. -Starting in Docker Engine v27.0, this flag can still be set, +In Docker Engine v27.0, this flag can still be set, but it has no effect unless the environment variable `DOCKERD_DEPRECATED_CORS_HEADER` is also set to a non-empty value. -This flag will be removed altogether in v28.0. +This flag has been removed altogether in v28.0. This is a breaking change for authorization plugins and other programs that depend on this option for accessing the Docker API from a browser. @@ -224,15 +299,15 @@ configuring TLS (or SSH) for the Docker daemon, refer to ### `Container` and `ContainerConfig` fields in Image inspect **Deprecated in Release: v25.0** -**Target For Removal In Release: v26.0** +**Removed In Release: v26.0** The `Container` and `ContainerConfig` fields returned by `docker inspect` are mostly an implementation detail of the classic (non-BuildKit) image builder. These fields are not portable and are empty when using the BuildKit-based builder (enabled by default since v23.0). -These fields are deprecated in v25.0 and will be omitted starting from v26.0. -If image configuration of an image is needed, you can obtain it from the -`Config` field. +These fields are deprecated in v25.0 and are omitted starting from v26.0 ( +API version v1.45 and up). If image configuration of an image is needed, +you can obtain it from the `Config` field. ### Deprecate legacy API versions @@ -274,20 +349,22 @@ Error response from daemon: client version 1.23 is too old. Minimum supported AP upgrade your client to a newer version ``` +Support for API versions lower than `1.24` has been permanently removed in Docker +Engine v26, and the minimum supported API version will be incrementally raised +in releases following that. + + + ### Container short ID in network Aliases field @@ -307,7 +384,7 @@ introduced in v25.0 and should be used instead of the `Aliases` field. ### IsAutomated field, and `is-automated` filter on `docker search` **Deprecated in Release: v25.0** -**Target For Removal In Release: v26.0** +**Removed In Release: v28.2** The `is_automated` field has been deprecated by Docker Hub's search API. Consequently, the `IsAutomated` field in image search will always be set @@ -316,7 +393,7 @@ results. The `AUTOMATED` column has been removed from the default `docker search` and `docker image search` output in v25.0, and the corresponding `IsAutomated` -templating option will be removed in v26.0. +templating has been removed in v28.2. ### Logentries logging driver @@ -498,6 +575,7 @@ CLI configuration file are no longer used, and ignored. ### Pulling images from non-compliant image registries **Deprecated in Release: v20.10** +**Removed in Release: v28.2** Docker Engine v20.10 and up includes optimizations to verify if images in the local image cache need updating before pulling, preventing the Docker Engine @@ -507,7 +585,7 @@ image registry to conform to the [Open Container Initiative Distribution Specifi While most registries conform to the specification, we encountered some registries to be non-compliant, resulting in `docker pull` to fail. -As a temporary solution, Docker Engine v20.10 includes a fallback mechanism to +As a temporary solution, Docker Engine v20.10 added a fallback mechanism to allow `docker pull` to be functional when using a non-compliant registry. A warning message is printed in this situation: @@ -516,16 +594,13 @@ warning message is printed in this situation: pull by tag. This fallback is DEPRECATED, and will be removed in a future release. -The fallback is added to allow users to either migrate their images to a compliant -registry, or for these registries to become compliant. - -Note that this fallback only addresses failures on `docker pull`. Other commands, -such as `docker stack deploy`, or pulling images with `containerd` will continue -to fail. +The fallback was added to allow users to either migrate their images to a +compliant registry, or for these registries to become compliant. -Given that other functionality is still broken with these registries, we consider -this fallback a _temporary_ solution, and will remove the fallback in an upcoming -major release. +GitHub deprecated the legacy `docker.pkg.github.com` registry, and it was +[sunset on Feb 24th, 2025](https://github.blog/changelog/2025-01-23-legacy-docker-registry-closing-down/) +in favor of GitHub Container Registry (GHCR, ghcr.io), making this fallback +no longer needed. ### Linux containers on Windows (LCOW) (experimental) @@ -666,6 +741,7 @@ files. ### `fluentd-async-connect` log opt **Deprecated in Release: v20.10** +**Removed in Release: v28.0** The `--log-opt fluentd-async-connect` option for the fluentd logging driver is [deprecated in favor of `--log-opt fluentd-async`](https://github.com/moby/moby/pull/39086). @@ -676,7 +752,7 @@ fluent#New: AsyncConnect is now deprecated, use Async instead ``` Users are encouraged to use the `fluentd-async` option going forward, as support -for the old option will be removed in a future release. +for the old option has been removed. ### Pushing and pulling with image manifest v2 schema 1 @@ -684,7 +760,8 @@ for the old option will be removed in a future release. **Disabled by default in Release: v26.0** -**Target For Removal In Release: v27.0** +**Removed in Release: v28.2** + The image manifest [v2 schema 1](https://distribution.github.io/distribution/spec/deprecated-schema-v1/) and "Docker Image v1" formats were deprecated in favor of the @@ -695,23 +772,17 @@ formats. These legacy formats should no longer be used, and users are recommended to update images to use current formats, or to upgrade to more current images. Starting with Docker v26.0, pulling these images is disabled by default, and -produces an error when attempting to pull the image: +support has been removed in v28.2. Attempting to pull a legacy image now +produces an error: ```console $ docker pull ubuntu:10.04 Error response from daemon: -[DEPRECATION NOTICE] Docker Image Format v1 and Docker Image manifest version 2, schema 1 support is disabled by default and will be removed in an upcoming release. +Docker Image Format v1 and Docker Image manifest version 2, schema 1 support has been removed. Suggest the author of docker.io/library/ubuntu:10.04 to upgrade the image to the OCI Format or Docker Image manifest v2, schema 2. More information at https://docs.docker.com/go/deprecated-image-specs/ ``` -An environment variable (`DOCKER_ENABLE_DEPRECATED_PULL_SCHEMA_1_IMAGE`) is -added in Docker v26.0 that allows re-enabling support for these image formats -in the daemon. This environment variable must be set to a non-empty value in -the daemon's environment (for example, through a [systemd override file](https://docs.docker.com/config/daemon/systemd/)). -Support for the `DOCKER_ENABLE_DEPRECATED_PULL_SCHEMA_1_IMAGE` environment variable -will be removed in Docker v27.0 after which this functionality is removed permanently. - ### `docker engine` subcommands **Deprecated in Release: v19.03** diff --git a/_vendor/github.com/docker/cli/docs/extend/_index.md b/_vendor/github.com/docker/cli/docs/extend/_index.md index 57528444d1ee..8c85b104852f 100644 --- a/_vendor/github.com/docker/cli/docs/extend/_index.md +++ b/_vendor/github.com/docker/cli/docs/extend/_index.md @@ -35,31 +35,38 @@ Plugins that start successfully are listed as enabled in the output. After a plugin is installed, you can use it as an option for another Docker operation, such as creating a volume. -In the following example, you install the `sshfs` plugin, verify that it is +In the following example, you install the [`rclone` plugin](https://rclone.org/docker/), verify that it is enabled, and use it to create a volume. > [!NOTE] -> This example is intended for instructional purposes only. Once the volume is -> created, your SSH password to the remote host is exposed as plaintext when -> inspecting the volume. Delete the volume as soon as you are done with the -> example. +> This example is intended for instructional purposes only. -1. Install the `sshfs` plugin. +1. Set up the pre-requisite directories. By default they must exist on the host at the following locations: - ```console - $ docker plugin install vieux/sshfs + - `/var/lib/docker-plugins/rclone/config`. Reserved for the `rclone.conf` config file and must exist even if it's empty and the config file is not present. + - `/var/lib/docker-plugins/rclone/cache`. Holds the plugin state file as well as optional VFS caches. - Plugin "vieux/sshfs" is requesting the following privileges: - - network: [host] - - capabilities: [CAP_SYS_ADMIN] - Do you grant the above permissions? [y/N] y +2. Install the `rclone` plugin. - vieux/sshfs + ```console + $ docker plugin install rclone/docker-volume-rclone --alias rclone + + Plugin "rclone/docker-volume-rclone" is requesting the following privileges: + - network: [host] + - mount: [/var/lib/docker-plugins/rclone/config] + - mount: [/var/lib/docker-plugins/rclone/cache] + - device: [/dev/fuse] + - capabilities: [CAP_SYS_ADMIN] + Do you grant the above permissions? [y/N] ``` - The plugin requests 2 privileges: + The plugin requests 5 privileges: - It needs access to the `host` network. + - Access to pre-requisite directories to mount to store: + - Your Rclone config files + - Temporary cache data + - Gives access to the FUSE (Filesystem in Userspace) device. This is required because Rclone uses FUSE to mount remote storage as if it were a local filesystem. - It needs the `CAP_SYS_ADMIN` capability, which allows the plugin to run the `mount` command. @@ -68,24 +75,25 @@ enabled, and use it to create a volume. ```console $ docker plugin ls - ID NAME TAG DESCRIPTION ENABLED - 69553ca1d789 vieux/sshfs latest the `sshfs` plugin true + ID NAME DESCRIPTION ENABLED + aede66158353 rclone:latest Rclone volume plugin for Docker true ``` 3. Create a volume using the plugin. This example mounts the `/remote` directory on host `1.2.3.4` into a - volume named `sshvolume`. + volume named `rclonevolume`. This volume can now be mounted into containers. ```console $ docker volume create \ - -d vieux/sshfs \ - --name sshvolume \ - -o sshcmd=user@1.2.3.4:/remote \ - -o password=$(cat file_containing_password_for_remote_host) - - sshvolume + -d rclone \ + --name rclonevolume \ + -o type=sftp \ + -o path=remote \ + -o sftp-host=1.2.3.4 \ + -o sftp-user=user \ + -o "sftp-password=$(cat file_containing_password_for_remote_host)" ``` 4. Verify that the volume was created successfully. @@ -94,21 +102,21 @@ enabled, and use it to create a volume. $ docker volume ls DRIVER NAME - vieux/sshfs sshvolume + rclone rclonevolume ``` -5. Start a container that uses the volume `sshvolume`. +5. Start a container that uses the volume `rclonevolume`. ```console - $ docker run --rm -v sshvolume:/data busybox ls /data + $ docker run --rm -v rclonevolume:/data busybox ls /data ``` -6. Remove the volume `sshvolume` +6. Remove the volume `rclonevolume` ```console - $ docker volume rm sshvolume + $ docker volume rm rclonevolume sshvolume ``` diff --git a/_vendor/github.com/docker/cli/docs/reference/dockerd.md b/_vendor/github.com/docker/cli/docs/reference/dockerd.md index 523eb82f13df..1e2b01633c5a 100644 --- a/_vendor/github.com/docker/cli/docs/reference/dockerd.md +++ b/_vendor/github.com/docker/cli/docs/reference/dockerd.md @@ -18,16 +18,16 @@ aliases: # daemon ```markdown -Usage: dockerd [OPTIONS] +Usage: dockerd [OPTIONS] A self-sufficient runtime for containers. Options: --add-runtime runtime Register an additional OCI compatible runtime (default []) - --allow-nondistributable-artifacts list Allow push of nondistributable artifacts to registry - --api-cors-header string Set CORS headers in the Engine API + --allow-direct-routing Allow remote access to published ports on container IP addresses --authorization-plugin list Authorization plugins to load - --bip string Specify network bridge IP + --bip string IPv4 address for the default bridge + --bip6 string IPv6 address for the default bridge -b, --bridge string Attach containers to a network bridge --cdi-spec-dir list CDI specification directories to use --cgroup-parent string Set parent cgroup for all containers @@ -44,8 +44,8 @@ Options: -D, --debug Enable debug mode --default-address-pool pool-options Default address pools for node specific local networks --default-cgroupns-mode string Default mode for containers cgroup namespace ("host" | "private") (default "private") - --default-gateway ip Container default gateway IPv4 address - --default-gateway-v6 ip Container default gateway IPv6 address + --default-gateway ip Default gateway IPv4 address for the default bridge network + --default-gateway-v6 ip Default gateway IPv6 address for the default bridge network --default-ipc-mode string Default mode for containers ipc ("shareable" | "private") (default "private") --default-network-opt mapmap Default network options (default map[]) --default-runtime string Default OCI runtime for containers (default "runc") @@ -58,25 +58,26 @@ Options: --exec-root string Root directory for execution state files (default "/var/run/docker") --experimental Enable experimental features --feature map Enable feature in the daemon - --fixed-cidr string IPv4 subnet for fixed IPs - --fixed-cidr-v6 string IPv6 subnet for fixed IPs + --fixed-cidr string IPv4 subnet for the default bridge network + --fixed-cidr-v6 string IPv6 subnet for the default bridge network -G, --group string Group for the unix socket (default "docker") --help Print usage -H, --host list Daemon socket(s) to connect to - --host-gateway-ip ip IP address that the special 'host-gateway' string in --add-host resolves to. - Defaults to the IP address of the default bridge + --host-gateway-ip list IP addresses that the special 'host-gateway' string in --add-host resolves to. + Defaults to the IP addresses of the default bridge --http-proxy string HTTP proxy URL to use for outgoing traffic --https-proxy string HTTPS proxy URL to use for outgoing traffic - --icc Enable inter-container communication (default true) + --icc Enable inter-container communication for the default bridge network (default true) --init Run an init in the container to forward signals and reap processes --init-path string Path to the docker-init binary --insecure-registry list Enable insecure registry communication - --ip ip Default IP when binding container ports (default 0.0.0.0) - --ip-forward Enable net.ipv4.ip_forward (default true) - --ip-masq Enable IP masquerading (default true) - --ip6tables Enable addition of ip6tables rules (experimental) + --ip ip Host IP for port publishing from the default bridge network (default 0.0.0.0) + --ip-forward Enable IP forwarding in system configuration (default true) + --ip-forward-no-drop Do not set the filter-FORWARD policy to DROP when enabling IP forwarding + --ip-masq Enable IP masquerading for the default bridge network (default true) + --ip6tables Enable addition of ip6tables rules (default true) --iptables Enable addition of iptables rules (default true) - --ipv6 Enable IPv6 networking + --ipv6 Enable IPv6 networking for the default bridge network --label list Set key=value labels to the daemon --live-restore Enable live restore of docker when containers are still running --log-driver string Default driver for container logs (default "json-file") @@ -87,7 +88,7 @@ Options: --max-concurrent-uploads int Set the max concurrent uploads (default 5) --max-download-attempts int Set the max download attempts for each pull (default 5) --metrics-addr string Set default address and port to serve the metrics api on - --mtu int Set the containers network MTU (default 1500) + --mtu int Set the MTU for the default "bridge" network (default 1500) --network-control-plane-mtu int Network Control plane MTU (default 1500) --no-new-privileges Set no-new-privileges by default for new containers --no-proxy string Comma-separated list of hosts or IP addresses for which the proxy is skipped @@ -96,7 +97,7 @@ Options: --raw-logs Full timestamps without ANSI coloring --registry-mirror list Preferred registry mirror --rootless Enable rootless mode; typically used with RootlessKit - --seccomp-profile string Path to seccomp profile. Use "unconfined" to disable the default seccomp profile (default "builtin") + --seccomp-profile string Path to seccomp profile. Set to "unconfined" to disable the default seccomp profile (default "builtin") --selinux-enabled Enable selinux support --shutdown-timeout int Set the default shutdown timeout (default 15) -s, --storage-driver string Storage driver to use @@ -687,34 +688,6 @@ To set the DNS search domain for all Docker containers, use: $ sudo dockerd --dns-search example.com ``` -### Allow push of non-distributable artifacts - -Some images (e.g., Windows base images) contain artifacts whose distribution is -restricted by license. When these images are pushed to a registry, restricted -artifacts are not included. - -To override this behavior for specific registries, use the -`--allow-nondistributable-artifacts` option in one of the following forms: - -* `--allow-nondistributable-artifacts myregistry:5000` tells the Docker daemon - to push non-distributable artifacts to myregistry:5000. -* `--allow-nondistributable-artifacts 10.1.0.0/16` tells the Docker daemon to - push non-distributable artifacts to all registries whose resolved IP address - is within the subnet described by the CIDR syntax. - -This option can be used multiple times. - -This option is useful when pushing images containing non-distributable artifacts -to a registry on an air-gapped network so hosts on that network can pull the -images without connecting to another server. - -> [!WARNING] -> Non-distributable artifacts typically have restrictions on how -> and where they can be distributed and shared. Only use this feature to push -> artifacts to private registries and ensure that you are in compliance with -> any terms that cover redistributing non-distributable artifacts. -{ .warning } - ### Insecure registries In this section, "registry" refers to a private registry, and `myregistry:5000` @@ -837,59 +810,79 @@ For details about how to use this feature, as well as limitations, see The Docker daemon supports a special `host-gateway` value for the `--add-host` flag for the `docker run` and `docker build` commands. This value resolves to -the host's gateway IP and lets containers connect to services running on the +addresses on the host, so that containers can connect to services running on the host. -By default, `host-gateway` resolves to the IP address of the default bridge. +By default, `host-gateway` resolves to the IPv4 address of the default bridge, +and its IPv6 address if it has one. + You can configure this to resolve to a different IP using the `--host-gateway-ip` flag for the dockerd command line interface, or the `host-gateway-ip` key in the daemon configuration file. +To supply both IPv4 and IPv6 addresses on the command line, use two +`--host-gateway-ip` options. + +To supply addresses in the daemon configuration file, use `"host-gateway-ips"` +with a JSON array, as shown below. For compatibility with older versions of the +daemon, a single IP address can also be specified as a JSON string in option +`"host-gateway-ip"`. + ```console $ cat > /etc/docker/daemon.json -{ "host-gateway-ip": "192.0.2.0" } +{ "host-gateway-ips": ["192.0.2.1", "2001:db8::1111"]} $ sudo systemctl restart docker $ docker run -it --add-host host.docker.internal:host-gateway \ busybox ping host.docker.internal -PING host.docker.internal (192.0.2.0): 56 data bytes +PING host.docker.internal (192.0.2.1): 56 data bytes +$ docker run -it --add-host host.docker.internal:host-gateway \ + busybox ping -6 host.docker.internal +PING host.docker.internal (2001:db8::1111): 56 data bytes ``` -### Enable CDI devices - -> [!NOTE] -> This is experimental feature and as such doesn't represent a stable API. -> -> This feature isn't enabled by default. To this feature, set `features.cdi` to -> `true` in the `daemon.json` configuration file. +### Configure CDI devices Container Device Interface (CDI) is a [standardized](https://github.com/cncf-tags/container-device-interface/blob/main/SPEC.md) mechanism for container runtimes to create containers which are able to interact with third party devices. +CDI is currently only supported for Linux containers and is enabled by default +since Docker Engine 28.3.0. + The Docker daemon supports running containers with CDI devices if the requested device specifications are available on the filesystem of the daemon. -The default specification directors are: +The default specification directories are: - `/etc/cdi/` for static CDI Specs - `/var/run/cdi` for generated CDI Specs -Alternatively, you can set custom locations for CDI specifications using the +#### Set custom locations + +To set custom locations for CDI specifications, use the `cdi-spec-dirs` option in the `daemon.json` configuration file, or the -`--cdi-spec-dir` flag for the `dockerd` CLI. +`--cdi-spec-dir` flag for the `dockerd` CLI: ```json { - "features": { - "cdi": true - }, "cdi-spec-dirs": ["/etc/cdi/", "/var/run/cdi"] } ``` -When CDI is enabled for a daemon, you can view the configured CDI specification -directories using the `docker info` command. +You can view the configured CDI specification directories using the `docker info` command. + +#### Disable CDI devices + +The feature in enabled by default. To disable it, use the `cdi` options in the `deamon.json` file: + +```json +"features": { + "cdi": false +}, +``` + +To check the status of the CDI devices, run `docker info`. #### Daemon logging format {#log-format} @@ -1072,10 +1065,10 @@ The following is a full example of the allowed configuration options on Linux: ```json { - "allow-nondistributable-artifacts": [], - "api-cors-header": "", + "allow-direct-routing": false, "authorization-plugins": [], "bip": "", + "bip6": "", "bridge": "", "builder": { "gc": { @@ -1220,7 +1213,6 @@ The following is a full example of the allowed configuration options on Windows: ```json { - "allow-nondistributable-artifacts": [], "authorization-plugins": [], "bridge": "", "containerd": "\\\\.\\pipe\\containerd-containerd", @@ -1317,14 +1309,13 @@ The list of currently supported options that can be reconfigured is this: | ---------------------------------- | ----------------------------------------------------------------------------------------------------------- | | `debug` | Toggles debug mode of the daemon. | | `labels` | Replaces the daemon labels with a new set of labels. | -| `live-restore` | Toggles [live restore](https://docs.docker.com/engine/containers/live-restore/). | +| `live-restore` | Toggles [live restore](https://docs.docker.com/engine/daemon/live-restore/). | | `max-concurrent-downloads` | Configures the max concurrent downloads for each pull. | | `max-concurrent-uploads` | Configures the max concurrent uploads for each push. | | `max-download-attempts` | Configures the max download attempts for each pull. | | `default-runtime` | Configures the runtime to be used if not is specified at container creation. | | `runtimes` | Configures the list of available OCI runtimes that can be used to run containers. | | `authorization-plugin` | Specifies the authorization plugins to use. | -| `allow-nondistributable-artifacts` | Specifies a list of registries to which the daemon will push non-distributable artifacts. | | `insecure-registries` | Specifies a list of registries that the daemon should consider insecure. | | `registry-mirrors` | Specifies a list of registry mirrors. | | `shutdown-timeout` | Configures the daemon's existing configuration timeout with a new timeout for shutting down all containers. | diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose.md b/_vendor/github.com/docker/compose/v2/docs/reference/compose.md index d625c253e675..74d129d832f9 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/compose.md +++ b/_vendor/github.com/docker/compose/v2/docs/reference/compose.md @@ -12,6 +12,7 @@ Define and run multi-container applications with Docker | Name | Description | |:--------------------------------|:----------------------------------------------------------------------------------------| | [`attach`](compose_attach.md) | Attach local standard input, output, and error streams to a service's running container | +| [`bridge`](compose_bridge.md) | Convert compose files into another model | | [`build`](compose_build.md) | Build or rebuild services | | [`commit`](compose_commit.md) | Create a new image from a service container's changes | | [`config`](compose_config.md) | Parse, resolve and render compose file in canonical format | @@ -28,6 +29,7 @@ Define and run multi-container applications with Docker | [`pause`](compose_pause.md) | Pause services | | [`port`](compose_port.md) | Print the public port for a port binding | | [`ps`](compose_ps.md) | List containers | +| [`publish`](compose_publish.md) | Publish compose application | | [`pull`](compose_pull.md) | Pull service images | | [`push`](compose_push.md) | Push service images | | [`restart`](compose_restart.md) | Restart service containers | @@ -41,6 +43,7 @@ Define and run multi-container applications with Docker | [`unpause`](compose_unpause.md) | Unpause services | | [`up`](compose_up.md) | Create and start containers | | [`version`](compose_version.md) | Show the Docker Compose version information | +| [`volumes`](compose_volumes.md) | List volumes | | [`wait`](compose_wait.md) | Block until containers of all (or specified) services stop. | | [`watch`](compose_watch.md) | Watch build context for service and rebuild/refresh containers when files are updated | @@ -57,7 +60,7 @@ Define and run multi-container applications with Docker | `-f`, `--file` | `stringArray` | | Compose configuration files | | `--parallel` | `int` | `-1` | Control max parallelism, -1 for unlimited | | `--profile` | `stringArray` | | Specify a profile to enable | -| `--progress` | `string` | `auto` | Set type of progress output (auto, tty, plain, json, quiet) | +| `--progress` | `string` | | Set type of progress output (auto, tty, plain, json, quiet) | | `--project-directory` | `string` | | Specify an alternate working directory
(default: the path of the, first specified, Compose file) | | `-p`, `--project-name` | `string` | | Project name | @@ -67,7 +70,7 @@ Define and run multi-container applications with Docker ## Examples ### Use `-f` to specify the name and path of one or more Compose files -Use the `-f` flag to specify the location of a Compose configuration file. +Use the `-f` flag to specify the location of a Compose [configuration file](/reference/compose-file/). #### Specifying multiple Compose files You can supply multiple `-f` configuration files. When you supply multiple files, Compose combines them into a single @@ -77,10 +80,10 @@ to their predecessors. For example, consider this command line: ```console -$ docker compose -f docker-compose.yml -f docker-compose.admin.yml run backup_db +$ docker compose -f compose.yaml -f compose.admin.yaml run backup_db ``` -The `docker-compose.yml` file might specify a `webapp` service. +The `compose.yaml` file might specify a `webapp` service. ```yaml services: @@ -91,7 +94,7 @@ services: volumes: - "/data" ``` -If the `docker-compose.admin.yml` also specifies this same service, any matching fields override the previous file. +If the `compose.admin.yaml` also specifies this same service, any matching fields override the previous file. New values, add to the `webapp` service configuration. ```yaml @@ -206,4 +209,4 @@ $ docker compose --dry-run up --build -d From the example above, you can see that the first step is to pull the image defined by `db` service, then build the `backend` service. Next, the containers are created. The `db` service is started, and the `backend` and `proxy` wait until the `db` service is healthy before starting. -Dry Run mode works with almost all commands. You cannot use Dry Run mode with a command that doesn't change the state of a Compose stack such as `ps`, `ls`, `logs` for example. +Dry Run mode works with almost all commands. You cannot use Dry Run mode with a command that doesn't change the state of a Compose stack such as `ps`, `ls`, `logs` for example. diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_alpha_publish.md b/_vendor/github.com/docker/compose/v2/docs/reference/compose_alpha_publish.md index 7fe79480ba9e..6e77d714532b 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/compose_alpha_publish.md +++ b/_vendor/github.com/docker/compose/v2/docs/reference/compose_alpha_publish.md @@ -8,8 +8,10 @@ Publish compose application | Name | Type | Default | Description | |:--------------------------|:---------|:--------|:-------------------------------------------------------------------------------| | `--dry-run` | `bool` | | Execute command in dry run mode | -| `--oci-version` | `string` | | OCI Image/Artifact specification version (automatically determined by default) | +| `--oci-version` | `string` | | OCI image/artifact specification version (automatically determined by default) | | `--resolve-image-digests` | `bool` | | Pin image tags to digests | +| `--with-env` | `bool` | | Include environment variables in the published OCI artifact | +| `-y`, `--yes` | `bool` | | Assume "yes" as answer to all prompts | diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_bridge.md b/_vendor/github.com/docker/compose/v2/docs/reference/compose_bridge.md new file mode 100644 index 000000000000..78d3da4934c5 --- /dev/null +++ b/_vendor/github.com/docker/compose/v2/docs/reference/compose_bridge.md @@ -0,0 +1,22 @@ +# docker compose bridge + + +Convert compose files into another model + +### Subcommands + +| Name | Description | +|:-------------------------------------------------------|:-----------------------------------------------------------------------------| +| [`convert`](compose_bridge_convert.md) | Convert compose files to Kubernetes manifests, Helm charts, or another model | +| [`transformations`](compose_bridge_transformations.md) | Manage transformation images | + + +### Options + +| Name | Type | Default | Description | +|:------------|:-------|:--------|:--------------------------------| +| `--dry-run` | `bool` | | Execute command in dry run mode | + + + + diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_bridge_convert.md b/_vendor/github.com/docker/compose/v2/docs/reference/compose_bridge_convert.md new file mode 100644 index 000000000000..d4b91ba172d2 --- /dev/null +++ b/_vendor/github.com/docker/compose/v2/docs/reference/compose_bridge_convert.md @@ -0,0 +1,17 @@ +# docker compose bridge convert + + +Convert compose files to Kubernetes manifests, Helm charts, or another model + +### Options + +| Name | Type | Default | Description | +|:-------------------------|:--------------|:--------|:-------------------------------------------------------------------------------------| +| `--dry-run` | `bool` | | Execute command in dry run mode | +| `-o`, `--output` | `string` | `out` | The output directory for the Kubernetes resources | +| `--templates` | `string` | | Directory containing transformation templates | +| `-t`, `--transformation` | `stringArray` | | Transformation to apply to compose model (default: docker/compose-bridge-kubernetes) | + + + + diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_bridge_transformations.md b/_vendor/github.com/docker/compose/v2/docs/reference/compose_bridge_transformations.md new file mode 100644 index 000000000000..1e1c7be392b1 --- /dev/null +++ b/_vendor/github.com/docker/compose/v2/docs/reference/compose_bridge_transformations.md @@ -0,0 +1,22 @@ +# docker compose bridge transformations + + +Manage transformation images + +### Subcommands + +| Name | Description | +|:-----------------------------------------------------|:-------------------------------| +| [`create`](compose_bridge_transformations_create.md) | Create a new transformation | +| [`list`](compose_bridge_transformations_list.md) | List available transformations | + + +### Options + +| Name | Type | Default | Description | +|:------------|:-------|:--------|:--------------------------------| +| `--dry-run` | `bool` | | Execute command in dry run mode | + + + + diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_bridge_transformations_create.md b/_vendor/github.com/docker/compose/v2/docs/reference/compose_bridge_transformations_create.md new file mode 100644 index 000000000000..187e8d9eca30 --- /dev/null +++ b/_vendor/github.com/docker/compose/v2/docs/reference/compose_bridge_transformations_create.md @@ -0,0 +1,15 @@ +# docker compose bridge transformations create + + +Create a new transformation + +### Options + +| Name | Type | Default | Description | +|:---------------|:---------|:--------|:----------------------------------------------------------------------------| +| `--dry-run` | `bool` | | Execute command in dry run mode | +| `-f`, `--from` | `string` | | Existing transformation to copy (default: docker/compose-bridge-kubernetes) | + + + + diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_bridge_transformations_list.md b/_vendor/github.com/docker/compose/v2/docs/reference/compose_bridge_transformations_list.md new file mode 100644 index 000000000000..ce0a5e6911ad --- /dev/null +++ b/_vendor/github.com/docker/compose/v2/docs/reference/compose_bridge_transformations_list.md @@ -0,0 +1,20 @@ +# docker compose bridge transformations list + + +List available transformations + +### Aliases + +`docker compose bridge transformations list`, `docker compose bridge transformations ls` + +### Options + +| Name | Type | Default | Description | +|:----------------|:---------|:--------|:-------------------------------------------| +| `--dry-run` | `bool` | | Execute command in dry run mode | +| `--format` | `string` | `table` | Format the output. Values: [table \| json] | +| `-q`, `--quiet` | `bool` | | Only display transformer names | + + + + diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_build.md b/_vendor/github.com/docker/compose/v2/docs/reference/compose_build.md index eb05121a7dce..5589a46934c6 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/compose_build.md +++ b/_vendor/github.com/docker/compose/v2/docs/reference/compose_build.md @@ -4,9 +4,9 @@ Services are built once and then tagged, by default as `project-service`. If the Compose file specifies an -[image](https://github.com/compose-spec/compose-spec/blob/master/spec.md#image) name, +[image](https://github.com/compose-spec/compose-spec/blob/main/spec.md#image) name, the image is tagged with that name, substituting any variables beforehand. See -[variable interpolation](https://github.com/compose-spec/compose-spec/blob/master/spec.md#interpolation). +[variable interpolation](https://github.com/compose-spec/compose-spec/blob/main/spec.md#interpolation). If you change a service's `Dockerfile` or the contents of its build directory, run `docker compose build` to rebuild it. @@ -17,9 +17,11 @@ run `docker compose build` to rebuild it. |:----------------------|:--------------|:--------|:------------------------------------------------------------------------------------------------------------| | `--build-arg` | `stringArray` | | Set build-time variables for services | | `--builder` | `string` | | Set builder to use | +| `--check` | `bool` | | Check build configuration | | `--dry-run` | `bool` | | Execute command in dry run mode | | `-m`, `--memory` | `bytes` | `0` | Set memory limit for the build container. Not supported by BuildKit. | | `--no-cache` | `bool` | | Do not use cache when building the image | +| `--print` | `bool` | | Print equivalent bake file | | `--pull` | `bool` | | Always attempt to pull a newer version of the image | | `--push` | `bool` | | Push service images | | `-q`, `--quiet` | `bool` | | Don't print anything to STDOUT | @@ -34,9 +36,9 @@ run `docker compose build` to rebuild it. Services are built once and then tagged, by default as `project-service`. If the Compose file specifies an -[image](https://github.com/compose-spec/compose-spec/blob/master/spec.md#image) name, +[image](https://github.com/compose-spec/compose-spec/blob/main/spec.md#image) name, the image is tagged with that name, substituting any variables beforehand. See -[variable interpolation](https://github.com/compose-spec/compose-spec/blob/master/spec.md#interpolation). +[variable interpolation](https://github.com/compose-spec/compose-spec/blob/main/spec.md#interpolation). If you change a service's `Dockerfile` or the contents of its build directory, run `docker compose build` to rebuild it. diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_config.md b/_vendor/github.com/docker/compose/v2/docs/reference/compose_config.md index 0eac3de63718..854eafe2168e 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/compose_config.md +++ b/_vendor/github.com/docker/compose/v2/docs/reference/compose_config.md @@ -5,20 +5,19 @@ It merges the Compose files set by `-f` flags, resolves variables in the Compose file, and expands short-notation into the canonical format. -### Aliases - -`docker compose config`, `docker compose convert` - ### Options | Name | Type | Default | Description | |:--------------------------|:---------|:--------|:----------------------------------------------------------------------------| | `--dry-run` | `bool` | | Execute command in dry run mode | | `--environment` | `bool` | | Print environment used for interpolation. | -| `--format` | `string` | `yaml` | Format the output. Values: [yaml \| json] | +| `--format` | `string` | | Format the output. Values: [yaml \| json] | | `--hash` | `string` | | Print the service config hash, one per line. | | `--images` | `bool` | | Print the image names, one per line. | +| `--lock-image-digests` | `bool` | | Produces an override file with image digests | +| `--networks` | `bool` | | Print the network names, one per line. | | `--no-consistency` | `bool` | | Don't check model consistency - warning: may produce invalid Compose output | +| `--no-env-resolution` | `bool` | | Don't resolve service env files | | `--no-interpolate` | `bool` | | Don't interpolate environment variables | | `--no-normalize` | `bool` | | Don't normalize compose model | | `--no-path-resolution` | `bool` | | Don't resolve file paths | diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_create.md b/_vendor/github.com/docker/compose/v2/docs/reference/compose_create.md index b87cce8572b9..4b0b876da91d 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/compose_create.md +++ b/_vendor/github.com/docker/compose/v2/docs/reference/compose_create.md @@ -16,7 +16,7 @@ Creates containers for a service | `--quiet-pull` | `bool` | | Pull without printing progress information | | `--remove-orphans` | `bool` | | Remove containers for services not defined in the Compose file | | `--scale` | `stringArray` | | Scale SERVICE to NUM instances. Overrides the `scale` setting in the Compose file if present. | -| `-y`, `--y` | `bool` | | Assume "yes" as answer to all prompts and run non-interactively | +| `-y`, `--yes` | `bool` | | Assume "yes" as answer to all prompts and run non-interactively | diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_ls.md b/_vendor/github.com/docker/compose/v2/docs/reference/compose_ls.md index 754e91797113..7719d208609f 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/compose_ls.md +++ b/_vendor/github.com/docker/compose/v2/docs/reference/compose_ls.md @@ -11,7 +11,7 @@ Lists running Compose projects | `--dry-run` | `bool` | | Execute command in dry run mode | | `--filter` | `filter` | | Filter output based on conditions provided | | `--format` | `string` | `table` | Format the output. Values: [table \| json] | -| `-q`, `--quiet` | `bool` | | Only display IDs | +| `-q`, `--quiet` | `bool` | | Only display project names | diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_publish.md b/_vendor/github.com/docker/compose/v2/docs/reference/compose_publish.md new file mode 100644 index 000000000000..8e5d181336b2 --- /dev/null +++ b/_vendor/github.com/docker/compose/v2/docs/reference/compose_publish.md @@ -0,0 +1,18 @@ +# docker compose publish + + +Publish compose application + +### Options + +| Name | Type | Default | Description | +|:--------------------------|:---------|:--------|:-------------------------------------------------------------------------------| +| `--dry-run` | `bool` | | Execute command in dry run mode | +| `--oci-version` | `string` | | OCI image/artifact specification version (automatically determined by default) | +| `--resolve-image-digests` | `bool` | | Pin image tags to digests | +| `--with-env` | `bool` | | Include environment variables in the published OCI artifact | +| `-y`, `--yes` | `bool` | | Assume "yes" as answer to all prompts | + + + + diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_restart.md b/_vendor/github.com/docker/compose/v2/docs/reference/compose_restart.md index e9bc161efd71..e57f346a81a2 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/compose_restart.md +++ b/_vendor/github.com/docker/compose/v2/docs/reference/compose_restart.md @@ -9,8 +9,8 @@ after a container is built, but before the container's command is executed) are after restarting. If you are looking to configure a service's restart policy, refer to -[restart](https://github.com/compose-spec/compose-spec/blob/master/spec.md#restart) -or [restart_policy](https://github.com/compose-spec/compose-spec/blob/master/deploy.md#restart_policy). +[restart](https://github.com/compose-spec/compose-spec/blob/main/spec.md#restart) +or [restart_policy](https://github.com/compose-spec/compose-spec/blob/main/deploy.md#restart_policy). ### Options @@ -33,5 +33,5 @@ after a container is built, but before the container's command is executed) are after restarting. If you are looking to configure a service's restart policy, refer to -[restart](https://github.com/compose-spec/compose-spec/blob/master/spec.md#restart) -or [restart_policy](https://github.com/compose-spec/compose-spec/blob/master/deploy.md#restart_policy). +[restart](https://github.com/compose-spec/compose-spec/blob/main/spec.md#restart) +or [restart_policy](https://github.com/compose-spec/compose-spec/blob/main/deploy.md#restart_policy). diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_run.md b/_vendor/github.com/docker/compose/v2/docs/reference/compose_run.md index f46d1872d2b5..25b28d1ded85 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/compose_run.md +++ b/_vendor/github.com/docker/compose/v2/docs/reference/compose_run.md @@ -66,6 +66,7 @@ specified in the service configuration. | `--dry-run` | `bool` | | Execute command in dry run mode | | `--entrypoint` | `string` | | Override the entrypoint of the image | | `-e`, `--env` | `stringArray` | | Set environment variables | +| `--env-from-file` | `stringArray` | | Set environment variables from file | | `-i`, `--interactive` | `bool` | `true` | Keep STDIN open even if not attached | | `-l`, `--label` | `stringArray` | | Add or override a label | | `--name` | `string` | | Assign a name to the container | @@ -73,6 +74,8 @@ specified in the service configuration. | `--no-deps` | `bool` | | Don't start linked services | | `-p`, `--publish` | `stringArray` | | Publish a container's port(s) to the host | | `--pull` | `string` | `policy` | Pull image before running ("always"\|"missing"\|"never") | +| `-q`, `--quiet` | `bool` | | Don't print anything to STDOUT | +| `--quiet-build` | `bool` | | Suppress progress output from the build process | | `--quiet-pull` | `bool` | | Pull without printing progress information | | `--remove-orphans` | `bool` | | Remove containers for services not defined in the Compose file | | `--rm` | `bool` | | Automatically remove the container when it exits | diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_stats.md b/_vendor/github.com/docker/compose/v2/docs/reference/compose_stats.md index 43d8bbc33d7b..78d44b89350d 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/compose_stats.md +++ b/_vendor/github.com/docker/compose/v2/docs/reference/compose_stats.md @@ -5,13 +5,13 @@ Display a live stream of container(s) resource usage statistics ### Options -| Name | Type | Default | Description | -|:--------------|:---------|:--------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `-a`, `--all` | `bool` | | Show all containers (default shows just running) | -| `--dry-run` | `bool` | | Execute command in dry run mode | -| `--format` | `string` | | Format output using a custom template:
'table': Print output in table format with column headers (default)
'table TEMPLATE': Print output in table format using the given Go template
'json': Print in JSON format
'TEMPLATE': Print output using the given Go template.
Refer to https://docs.docker.com/go/formatting/ for more information about formatting output with templates | -| `--no-stream` | `bool` | | Disable streaming stats and only pull the first result | -| `--no-trunc` | `bool` | | Do not truncate output | +| Name | Type | Default | Description | +|:--------------|:---------|:--------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `-a`, `--all` | `bool` | | Show all containers (default shows just running) | +| `--dry-run` | `bool` | | Execute command in dry run mode | +| `--format` | `string` | | Format output using a custom template:
'table': Print output in table format with column headers (default)
'table TEMPLATE': Print output in table format using the given Go template
'json': Print in JSON format
'TEMPLATE': Print output using the given Go template.
Refer to https://docs.docker.com/engine/cli/formatting/ for more information about formatting output with templates | +| `--no-stream` | `bool` | | Disable streaming stats and only pull the first result | +| `--no-trunc` | `bool` | | Do not truncate output | diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_up.md b/_vendor/github.com/docker/compose/v2/docs/reference/compose_up.md index d1478cdd3369..b831cb16d342 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/compose_up.md +++ b/_vendor/github.com/docker/compose/v2/docs/reference/compose_up.md @@ -53,7 +53,7 @@ If the process is interrupted using `SIGINT` (ctrl + C) or `SIGTERM`, the contai | `--wait` | `bool` | | Wait for services to be running\|healthy. Implies detached mode. | | `--wait-timeout` | `int` | `0` | Maximum duration in seconds to wait for the project to be running\|healthy | | `-w`, `--watch` | `bool` | | Watch source code and rebuild/refresh containers when files are updated. | -| `-y`, `--y` | `bool` | | Assume "yes" as answer to all prompts and run non-interactively | +| `-y`, `--yes` | `bool` | | Assume "yes" as answer to all prompts and run non-interactively | diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_volumes.md b/_vendor/github.com/docker/compose/v2/docs/reference/compose_volumes.md new file mode 100644 index 000000000000..6bad874f187b --- /dev/null +++ b/_vendor/github.com/docker/compose/v2/docs/reference/compose_volumes.md @@ -0,0 +1,16 @@ +# docker compose volumes + + +List volumes + +### Options + +| Name | Type | Default | Description | +|:----------------|:---------|:--------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `--dry-run` | `bool` | | Execute command in dry run mode | +| `--format` | `string` | `table` | Format output using a custom template:
'table': Print output in table format with column headers (default)
'table TEMPLATE': Print output in table format using the given Go template
'json': Print in JSON format
'TEMPLATE': Print output using the given Go template.
Refer to https://docs.docker.com/go/formatting/ for more information about formatting output with templates | +| `-q`, `--quiet` | `bool` | | Only display volume names | + + + + diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/compose_watch.md b/_vendor/github.com/docker/compose/v2/docs/reference/compose_watch.md index e2b4aef1a203..f6040c9094f2 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/compose_watch.md +++ b/_vendor/github.com/docker/compose/v2/docs/reference/compose_watch.md @@ -9,7 +9,7 @@ Watch build context for service and rebuild/refresh containers when files are up |:------------|:-------|:--------|:----------------------------------------------| | `--dry-run` | `bool` | | Execute command in dry run mode | | `--no-up` | `bool` | | Do not build & start services before watching | -| `--prune` | `bool` | | Prune dangling images on rebuild | +| `--prune` | `bool` | `true` | Prune dangling images on rebuild | | `--quiet` | `bool` | | hide build output | diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose.yaml b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose.yaml index 1c6fb4970e79..02a39d932326 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose.yaml +++ b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose.yaml @@ -6,6 +6,7 @@ pname: docker plink: docker.yaml cname: - docker compose attach + - docker compose bridge - docker compose build - docker compose commit - docker compose config @@ -22,6 +23,7 @@ cname: - docker compose pause - docker compose port - docker compose ps + - docker compose publish - docker compose pull - docker compose push - docker compose restart @@ -35,10 +37,12 @@ cname: - docker compose unpause - docker compose up - docker compose version + - docker compose volumes - docker compose wait - docker compose watch clink: - docker_compose_attach.yaml + - docker_compose_bridge.yaml - docker_compose_build.yaml - docker_compose_commit.yaml - docker_compose_config.yaml @@ -55,6 +59,7 @@ clink: - docker_compose_pause.yaml - docker_compose_port.yaml - docker_compose_ps.yaml + - docker_compose_publish.yaml - docker_compose_pull.yaml - docker_compose_push.yaml - docker_compose_restart.yaml @@ -68,6 +73,7 @@ clink: - docker_compose_unpause.yaml - docker_compose_up.yaml - docker_compose_version.yaml + - docker_compose_volumes.yaml - docker_compose_wait.yaml - docker_compose_watch.yaml options: @@ -165,7 +171,6 @@ options: swarm: false - option: progress value_type: string - default_value: auto description: Set type of progress output (auto, tty, plain, json, quiet) deprecated: false hidden: false @@ -229,7 +234,7 @@ options: swarm: false examples: |- ### Use `-f` to specify the name and path of one or more Compose files - Use the `-f` flag to specify the location of a Compose configuration file. + Use the `-f` flag to specify the location of a Compose [configuration file](/reference/compose-file/). #### Specifying multiple Compose files You can supply multiple `-f` configuration files. When you supply multiple files, Compose combines them into a single @@ -239,10 +244,10 @@ examples: |- For example, consider this command line: ```console - $ docker compose -f docker-compose.yml -f docker-compose.admin.yml run backup_db + $ docker compose -f compose.yaml -f compose.admin.yaml run backup_db ``` - The `docker-compose.yml` file might specify a `webapp` service. + The `compose.yaml` file might specify a `webapp` service. ```yaml services: @@ -253,7 +258,7 @@ examples: |- volumes: - "/data" ``` - If the `docker-compose.admin.yml` also specifies this same service, any matching fields override the previous file. + If the `compose.admin.yaml` also specifies this same service, any matching fields override the previous file. New values, add to the `webapp` service configuration. ```yaml diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_alpha_generate.yaml b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_alpha_generate.yaml index 0932af080ecc..f31429c2d725 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_alpha_generate.yaml +++ b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_alpha_generate.yaml @@ -45,7 +45,7 @@ inherited_options: kubernetes: false swarm: false deprecated: false -hidden: false +hidden: true experimental: false experimentalcli: true kubernetes: false diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_alpha_publish.yaml b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_alpha_publish.yaml index 7a2da5ca92da..2c92249395c6 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_alpha_publish.yaml +++ b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_alpha_publish.yaml @@ -8,7 +8,7 @@ options: - option: oci-version value_type: string description: | - OCI Image/Artifact specification version (automatically determined by default) + OCI image/artifact specification version (automatically determined by default) deprecated: false hidden: false experimental: false @@ -25,6 +25,27 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: with-env + value_type: bool + default_value: "false" + description: Include environment variables in the published OCI artifact + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: "yes" + shorthand: "y" + value_type: bool + default_value: "false" + description: Assume "yes" as answer to all prompts + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false inherited_options: - option: dry-run value_type: bool @@ -37,7 +58,7 @@ inherited_options: kubernetes: false swarm: false deprecated: false -hidden: false +hidden: true experimental: false experimentalcli: true kubernetes: false diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_alpha_viz.yaml b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_alpha_viz.yaml index b179d648ef83..c07475caac8a 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_alpha_viz.yaml +++ b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_alpha_viz.yaml @@ -69,7 +69,7 @@ inherited_options: kubernetes: false swarm: false deprecated: false -hidden: false +hidden: true experimental: false experimentalcli: true kubernetes: false diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_bridge.yaml b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_bridge.yaml new file mode 100644 index 000000000000..5ef9ebf55850 --- /dev/null +++ b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_bridge.yaml @@ -0,0 +1,29 @@ +command: docker compose bridge +short: Convert compose files into another model +long: Convert compose files into another model +pname: docker compose +plink: docker_compose.yaml +cname: + - docker compose bridge convert + - docker compose bridge transformations +clink: + - docker_compose_bridge_convert.yaml + - docker_compose_bridge_transformations.yaml +inherited_options: + - option: dry-run + value_type: bool + default_value: "false" + description: Execute command in dry run mode + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_bridge_convert.yaml b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_bridge_convert.yaml new file mode 100644 index 000000000000..f55f0b233c3c --- /dev/null +++ b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_bridge_convert.yaml @@ -0,0 +1,59 @@ +command: docker compose bridge convert +short: | + Convert compose files to Kubernetes manifests, Helm charts, or another model +long: | + Convert compose files to Kubernetes manifests, Helm charts, or another model +usage: docker compose bridge convert +pname: docker compose bridge +plink: docker_compose_bridge.yaml +options: + - option: output + shorthand: o + value_type: string + default_value: out + description: The output directory for the Kubernetes resources + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: templates + value_type: string + description: Directory containing transformation templates + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: transformation + shorthand: t + value_type: stringArray + default_value: '[]' + description: | + Transformation to apply to compose model (default: docker/compose-bridge-kubernetes) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +inherited_options: + - option: dry-run + value_type: bool + default_value: "false" + description: Execute command in dry run mode + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_bridge_transformations.yaml b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_bridge_transformations.yaml new file mode 100644 index 000000000000..2ab5661f0b2a --- /dev/null +++ b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_bridge_transformations.yaml @@ -0,0 +1,29 @@ +command: docker compose bridge transformations +short: Manage transformation images +long: Manage transformation images +pname: docker compose bridge +plink: docker_compose_bridge.yaml +cname: + - docker compose bridge transformations create + - docker compose bridge transformations list +clink: + - docker_compose_bridge_transformations_create.yaml + - docker_compose_bridge_transformations_list.yaml +inherited_options: + - option: dry-run + value_type: bool + default_value: "false" + description: Execute command in dry run mode + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_bridge_transformations_create.yaml b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_bridge_transformations_create.yaml new file mode 100644 index 000000000000..e8dd9e58a51e --- /dev/null +++ b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_bridge_transformations_create.yaml @@ -0,0 +1,36 @@ +command: docker compose bridge transformations create +short: Create a new transformation +long: Create a new transformation +usage: docker compose bridge transformations create [OPTION] PATH +pname: docker compose bridge transformations +plink: docker_compose_bridge_transformations.yaml +options: + - option: from + shorthand: f + value_type: string + description: | + Existing transformation to copy (default: docker/compose-bridge-kubernetes) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +inherited_options: + - option: dry-run + value_type: bool + default_value: "false" + description: Execute command in dry run mode + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_bridge_transformations_list.yaml b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_bridge_transformations_list.yaml new file mode 100644 index 000000000000..3afd3a84b8e7 --- /dev/null +++ b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_bridge_transformations_list.yaml @@ -0,0 +1,47 @@ +command: docker compose bridge transformations list +aliases: docker compose bridge transformations list, docker compose bridge transformations ls +short: List available transformations +long: List available transformations +usage: docker compose bridge transformations list +pname: docker compose bridge transformations +plink: docker_compose_bridge_transformations.yaml +options: + - option: format + value_type: string + default_value: table + description: 'Format the output. Values: [table | json]' + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: quiet + shorthand: q + value_type: bool + default_value: "false" + description: Only display transformer names + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +inherited_options: + - option: dry-run + value_type: bool + default_value: "false" + description: Execute command in dry run mode + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_build.yaml b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_build.yaml index ade039dc4e34..6d1446a51012 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_build.yaml +++ b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_build.yaml @@ -4,9 +4,9 @@ long: |- Services are built once and then tagged, by default as `project-service`. If the Compose file specifies an - [image](https://github.com/compose-spec/compose-spec/blob/master/spec.md#image) name, + [image](https://github.com/compose-spec/compose-spec/blob/main/spec.md#image) name, the image is tagged with that name, substituting any variables beforehand. See - [variable interpolation](https://github.com/compose-spec/compose-spec/blob/master/spec.md#interpolation). + [variable interpolation](https://github.com/compose-spec/compose-spec/blob/main/spec.md#interpolation). If you change a service's `Dockerfile` or the contents of its build directory, run `docker compose build` to rebuild it. @@ -33,6 +33,16 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: check + value_type: bool + default_value: "false" + description: Check build configuration + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false - option: compress value_type: bool default_value: "true" @@ -96,9 +106,18 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: print + value_type: bool + default_value: "false" + description: Print equivalent bake file + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false - option: progress value_type: string - default_value: auto description: Set type of ui output (auto, tty, plain, json, quiet) deprecated: false hidden: true diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_config.yaml b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_config.yaml index 8073d85ab6e0..b95c58c279a1 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_config.yaml +++ b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_config.yaml @@ -1,5 +1,4 @@ command: docker compose config -aliases: docker compose config, docker compose convert short: Parse, resolve and render compose file in canonical format long: |- `docker compose config` renders the actual data model to be applied on the Docker Engine. @@ -21,7 +20,6 @@ options: swarm: false - option: format value_type: string - default_value: yaml description: 'Format the output. Values: [yaml | json]' deprecated: false hidden: false @@ -48,6 +46,26 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: lock-image-digests + value_type: bool + default_value: "false" + description: Produces an override file with image digests + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: networks + value_type: bool + default_value: "false" + description: Print the network names, one per line. + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false - option: no-consistency value_type: bool default_value: "false" @@ -59,6 +77,16 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: no-env-resolution + value_type: bool + default_value: "false" + description: Don't resolve service env files + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false - option: no-interpolate value_type: bool default_value: "false" diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_create.yaml b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_create.yaml index 7cb764098b99..f6ab1b868244 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_create.yaml +++ b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_create.yaml @@ -88,7 +88,7 @@ options: experimentalcli: false kubernetes: false swarm: false - - option: "y" + - option: "yes" shorthand: "y" value_type: bool default_value: "false" diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_ls.yaml b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_ls.yaml index a2efac2a88de..dd6418c652f1 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_ls.yaml +++ b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_ls.yaml @@ -39,7 +39,7 @@ options: shorthand: q value_type: bool default_value: "false" - description: Only display IDs + description: Only display project names deprecated: false hidden: false experimental: false diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_publish.yaml b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_publish.yaml new file mode 100644 index 000000000000..44a7a46dd421 --- /dev/null +++ b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_publish.yaml @@ -0,0 +1,66 @@ +command: docker compose publish +short: Publish compose application +long: Publish compose application +usage: docker compose publish [OPTIONS] REPOSITORY[:TAG] +pname: docker compose +plink: docker_compose.yaml +options: + - option: oci-version + value_type: string + description: | + OCI image/artifact specification version (automatically determined by default) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: resolve-image-digests + value_type: bool + default_value: "false" + description: Pin image tags to digests + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: with-env + value_type: bool + default_value: "false" + description: Include environment variables in the published OCI artifact + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: "yes" + shorthand: "y" + value_type: bool + default_value: "false" + description: Assume "yes" as answer to all prompts + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +inherited_options: + - option: dry-run + value_type: bool + default_value: "false" + description: Execute command in dry run mode + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_restart.yaml b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_restart.yaml index 3b2a4bddd1b4..3bc0a3ad83ae 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_restart.yaml +++ b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_restart.yaml @@ -9,8 +9,8 @@ long: |- after restarting. If you are looking to configure a service's restart policy, refer to - [restart](https://github.com/compose-spec/compose-spec/blob/master/spec.md#restart) - or [restart_policy](https://github.com/compose-spec/compose-spec/blob/master/deploy.md#restart_policy). + [restart](https://github.com/compose-spec/compose-spec/blob/main/spec.md#restart) + or [restart_policy](https://github.com/compose-spec/compose-spec/blob/main/deploy.md#restart_policy). usage: docker compose restart [OPTIONS] [SERVICE...] pname: docker compose plink: docker_compose.yaml diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_run.yaml b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_run.yaml index dc19a95de4b6..61c7ca0e8cbc 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_run.yaml +++ b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_run.yaml @@ -117,6 +117,16 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: env-from-file + value_type: stringArray + default_value: '[]' + description: Set environment variables from file + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false - option: interactive shorthand: i value_type: bool @@ -190,6 +200,27 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: quiet + shorthand: q + value_type: bool + default_value: "false" + description: Don't print anything to STDOUT + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: quiet-build + value_type: bool + default_value: "false" + description: Suppress progress output from the build process + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false - option: quiet-pull value_type: bool default_value: "false" diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_stats.yaml b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_stats.yaml index cd8e801b321f..e6854b05a259 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_stats.yaml +++ b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_stats.yaml @@ -24,7 +24,7 @@ options: 'table TEMPLATE': Print output in table format using the given Go template 'json': Print in JSON format 'TEMPLATE': Print output using the given Go template. - Refer to https://docs.docker.com/go/formatting/ for more information about formatting output with templates + Refer to https://docs.docker.com/engine/cli/formatting/ for more information about formatting output with templates deprecated: false hidden: false experimental: false diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_up.yaml b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_up.yaml index 0ed31b3599ce..47e0c5259ebb 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_up.yaml +++ b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_up.yaml @@ -309,7 +309,7 @@ options: experimentalcli: false kubernetes: false swarm: false - - option: "y" + - option: "yes" shorthand: "y" value_type: bool default_value: "false" diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_volumes.yaml b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_volumes.yaml new file mode 100644 index 000000000000..20516db7f137 --- /dev/null +++ b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_volumes.yaml @@ -0,0 +1,52 @@ +command: docker compose volumes +short: List volumes +long: List volumes +usage: docker compose volumes [OPTIONS] [SERVICE...] +pname: docker compose +plink: docker_compose.yaml +options: + - option: format + value_type: string + default_value: table + description: |- + Format output using a custom template: + 'table': Print output in table format with column headers (default) + 'table TEMPLATE': Print output in table format using the given Go template + 'json': Print in JSON format + 'TEMPLATE': Print output using the given Go template. + Refer to https://docs.docker.com/go/formatting/ for more information about formatting output with templates + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: quiet + shorthand: q + value_type: bool + default_value: "false" + description: Only display volume names + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +inherited_options: + - option: dry-run + value_type: bool + default_value: "false" + description: Execute command in dry run mode + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_watch.yaml b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_watch.yaml index 454bf36342ad..a3e3e8022011 100644 --- a/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_watch.yaml +++ b/_vendor/github.com/docker/compose/v2/docs/reference/docker_compose_watch.yaml @@ -19,7 +19,7 @@ options: swarm: false - option: prune value_type: bool - default_value: "false" + default_value: "true" description: Prune dangling images on rebuild deprecated: false hidden: false diff --git a/_vendor/github.com/docker/model-cli/docs/reference/docker_model.yaml b/_vendor/github.com/docker/model-cli/docs/reference/docker_model.yaml new file mode 100644 index 000000000000..873348e5c484 --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/docker_model.yaml @@ -0,0 +1,48 @@ +command: docker model +short: Docker Model Runner +long: |- + Use Docker Model Runner to run and interact with AI models directly from the command line. + For more information, see the [documentation](/ai/model-runner/) +pname: docker +plink: docker.yaml +cname: + - docker model df + - docker model inspect + - docker model install-runner + - docker model list + - docker model logs + - docker model package + - docker model ps + - docker model pull + - docker model push + - docker model rm + - docker model run + - docker model status + - docker model tag + - docker model uninstall-runner + - docker model unload + - docker model version +clink: + - docker_model_df.yaml + - docker_model_inspect.yaml + - docker_model_install-runner.yaml + - docker_model_list.yaml + - docker_model_logs.yaml + - docker_model_package.yaml + - docker_model_ps.yaml + - docker_model_pull.yaml + - docker_model_push.yaml + - docker_model_rm.yaml + - docker_model_run.yaml + - docker_model_status.yaml + - docker_model_tag.yaml + - docker_model_uninstall-runner.yaml + - docker_model_unload.yaml + - docker_model_version.yaml +deprecated: false +hidden: false +experimental: false +experimentalcli: true +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-cli/docs/reference/docker_model_compose.yaml b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_compose.yaml new file mode 100644 index 000000000000..79353c66aaae --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_compose.yaml @@ -0,0 +1,28 @@ +command: docker model compose +pname: docker model +plink: docker_model.yaml +cname: + - docker model compose down + - docker model compose metadata + - docker model compose up +clink: + - docker_model_compose_down.yaml + - docker_model_compose_metadata.yaml + - docker_model_compose_up.yaml +options: + - option: project-name + value_type: string + description: compose project name + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: true +experimental: false +experimentalcli: true +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-cli/docs/reference/docker_model_compose_down.yaml b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_compose_down.yaml new file mode 100644 index 000000000000..9770b566a273 --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_compose_down.yaml @@ -0,0 +1,21 @@ +command: docker model compose down +usage: docker model compose down +pname: docker model compose +plink: docker_model_compose.yaml +inherited_options: + - option: project-name + value_type: string + description: compose project name + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: true +experimental: false +experimentalcli: true +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-cli/docs/reference/docker_model_compose_metadata.yaml b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_compose_metadata.yaml new file mode 100644 index 000000000000..ae54bc67afec --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_compose_metadata.yaml @@ -0,0 +1,23 @@ +command: docker model compose metadata +short: Metadata for Docker Compose +long: Metadata for Docker Compose +usage: docker model compose metadata +pname: docker model compose +plink: docker_model_compose.yaml +inherited_options: + - option: project-name + value_type: string + description: compose project name + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: true +experimental: false +experimentalcli: true +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-cli/docs/reference/docker_model_compose_up.yaml b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_compose_up.yaml new file mode 100644 index 000000000000..7a746d11f1ea --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_compose_up.yaml @@ -0,0 +1,61 @@ +command: docker model compose up +usage: docker model compose up +pname: docker model compose +plink: docker_model_compose.yaml +options: + - option: backend + value_type: string + default_value: llama.cpp + description: inference backend to use + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: context-size + value_type: int64 + default_value: "-1" + description: context size for the model + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: model + value_type: stringArray + default_value: '[]' + description: model to use + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: runtime-flags + value_type: string + description: raw runtime flags to pass to the inference engine + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +inherited_options: + - option: project-name + value_type: string + description: compose project name + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: true +experimental: false +experimentalcli: true +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-cli/docs/reference/docker_model_configure.yaml b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_configure.yaml new file mode 100644 index 000000000000..e94cbc918ebc --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_configure.yaml @@ -0,0 +1,24 @@ +command: docker model configure +short: Configure runtime options for a model +long: Configure runtime options for a model +usage: docker model configure [--context-size=] MODEL [-- ] +pname: docker model +plink: docker_model.yaml +options: + - option: context-size + value_type: int64 + default_value: "-1" + description: context size (in tokens) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: true +experimental: false +experimentalcli: true +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-cli/docs/reference/docker_model_df.yaml b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_df.yaml new file mode 100644 index 000000000000..f1b3fca07c0d --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_df.yaml @@ -0,0 +1,13 @@ +command: docker model df +short: Show Docker Model Runner disk usage +long: Show Docker Model Runner disk usage +usage: docker model df +pname: docker model +plink: docker_model.yaml +deprecated: false +hidden: false +experimental: false +experimentalcli: true +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-cli/docs/reference/docker_model_inspect.yaml b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_inspect.yaml new file mode 100644 index 000000000000..0684354c9bf9 --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_inspect.yaml @@ -0,0 +1,35 @@ +command: docker model inspect +short: Display detailed information on one model +long: Display detailed information on one model +usage: docker model inspect MODEL +pname: docker model +plink: docker_model.yaml +options: + - option: openai + value_type: bool + default_value: "false" + description: List model in an OpenAI format + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: remote + shorthand: r + value_type: bool + default_value: "false" + description: Show info for remote models + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: true +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-cli/docs/reference/docker_model_install-runner.yaml b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_install-runner.yaml new file mode 100644 index 000000000000..bc4dc488979c --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_install-runner.yaml @@ -0,0 +1,45 @@ +command: docker model install-runner +short: Install Docker Model Runner (Docker Engine only) +long: | + This command runs implicitly when a docker model command is executed. You can run this command explicitly to add a new configuration. +usage: docker model install-runner +pname: docker model +plink: docker_model.yaml +options: + - option: do-not-track + value_type: bool + default_value: "false" + description: Do not track models usage in Docker Model Runner + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: gpu + value_type: string + default_value: auto + description: Specify GPU support (none|auto|cuda) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: port + value_type: uint16 + default_value: "12434" + description: Docker container port for Docker Model Runner + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: true +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-cli/docs/reference/docker_model_list.yaml b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_list.yaml new file mode 100644 index 000000000000..292704ade7c5 --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_list.yaml @@ -0,0 +1,46 @@ +command: docker model list +aliases: docker model list, docker model ls +short: List the models pulled to your local environment +long: List the models pulled to your local environment +usage: docker model list [OPTIONS] +pname: docker model +plink: docker_model.yaml +options: + - option: json + value_type: bool + default_value: "false" + description: List models in a JSON format + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: openai + value_type: bool + default_value: "false" + description: List models in an OpenAI format + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: quiet + shorthand: q + value_type: bool + default_value: "false" + description: Only show model IDs + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: true +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-cli/docs/reference/docker_model_logs.yaml b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_logs.yaml new file mode 100644 index 000000000000..84a01f89e955 --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_logs.yaml @@ -0,0 +1,35 @@ +command: docker model logs +short: Fetch the Docker Model Runner logs +long: Fetch the Docker Model Runner logs +usage: docker model logs [OPTIONS] +pname: docker model +plink: docker_model.yaml +options: + - option: follow + shorthand: f + value_type: bool + default_value: "false" + description: View logs with real-time streaming + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: no-engines + value_type: bool + default_value: "false" + description: Exclude inference engine logs from the output + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: true +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-cli/docs/reference/docker_model_package.yaml b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_package.yaml new file mode 100644 index 000000000000..532909a68150 --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_package.yaml @@ -0,0 +1,56 @@ +command: docker model package +short: | + Package a GGUF file into a Docker model OCI artifact, with optional licenses, and pushes it to the specified registry +long: | + Package a GGUF file into a Docker model OCI artifact, with optional licenses, and pushes it to the specified registry +usage: docker model package --gguf [--license ...] [--context-size ] --push TARGET +pname: docker model +plink: docker_model.yaml +options: + - option: context-size + value_type: uint64 + default_value: "0" + description: context size in tokens + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: gguf + value_type: string + description: absolute path to gguf file (required) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: license + shorthand: l + value_type: stringArray + default_value: '[]' + description: absolute path to a license file + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: push + value_type: bool + default_value: "false" + description: push to registry (required) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: true +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-cli/docs/reference/docker_model_ps.yaml b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_ps.yaml new file mode 100644 index 000000000000..54ac98561c36 --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_ps.yaml @@ -0,0 +1,13 @@ +command: docker model ps +short: List running models +long: List running models +usage: docker model ps +pname: docker model +plink: docker_model.yaml +deprecated: false +hidden: false +experimental: false +experimentalcli: true +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-cli/docs/reference/docker_model_pull.yaml b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_pull.yaml new file mode 100644 index 000000000000..f0843b020636 --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_pull.yaml @@ -0,0 +1,33 @@ +command: docker model pull +short: Pull a model from Docker Hub or HuggingFace to your local environment +long: | + Pull a model to your local environment. Downloaded models also appear in the Docker Desktop Dashboard. +usage: docker model pull MODEL +pname: docker model +plink: docker_model.yaml +examples: |- + ### Pulling a model from Docker Hub + + ```console + docker model pull ai/smollm2 + ``` + + ### Pulling from HuggingFace + + You can pull GGUF models directly from [Hugging Face](https://huggingface.co/models?library=gguf). + + **Note about quantization:** If no tag is specified, the command tries to pull the `Q4_K_M` version of the model. + If `Q4_K_M` doesn't exist, the command pulls the first GGUF found in the **Files** view of the model on HuggingFace. + To specify the quantization, provide it as a tag, for example: + `docker model pull hf.co/bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_S` + + ```console + docker model pull hf.co/bartowski/Llama-3.2-1B-Instruct-GGUF + ``` +deprecated: false +hidden: false +experimental: false +experimentalcli: true +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-cli/docs/reference/docker_model_push.yaml b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_push.yaml new file mode 100644 index 000000000000..4bd953bc0d8b --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_push.yaml @@ -0,0 +1,13 @@ +command: docker model push +short: Push a model to Docker Hub +long: Push a model to Docker Hub +usage: docker model push MODEL +pname: docker model +plink: docker_model.yaml +deprecated: false +hidden: false +experimental: false +experimentalcli: true +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-cli/docs/reference/docker_model_rm.yaml b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_rm.yaml new file mode 100644 index 000000000000..426bfd88da80 --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_rm.yaml @@ -0,0 +1,25 @@ +command: docker model rm +short: Remove local models downloaded from Docker Hub +long: Remove local models downloaded from Docker Hub +usage: docker model rm [MODEL...] +pname: docker model +plink: docker_model.yaml +options: + - option: force + shorthand: f + value_type: bool + default_value: "false" + description: Forcefully remove the model + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: true +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-cli/docs/reference/docker_model_run.yaml b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_run.yaml new file mode 100644 index 000000000000..4d18d3c6f6fb --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_run.yaml @@ -0,0 +1,57 @@ +command: docker model run +short: Run a model and interact with it using a submitted prompt or chat mode +long: |- + When you run a model, Docker calls an inference server API endpoint hosted by the Model Runner through Docker Desktop. The model stays in memory until another model is requested, or until a pre-defined inactivity timeout is reached (currently 5 minutes). + + You do not have to use Docker model run before interacting with a specific model from a host process or from within a container. Model Runner transparently loads the requested model on-demand, assuming it has been pulled and is locally available. + + You can also use chat mode in the Docker Desktop Dashboard when you select the model in the **Models** tab. +usage: docker model run MODEL [PROMPT] +pname: docker model +plink: docker_model.yaml +options: + - option: debug + value_type: bool + default_value: "false" + description: Enable debug logging + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +examples: |- + ### One-time prompt + + ```console + docker model run ai/smollm2 "Hi" + ``` + + Output: + + ```console + Hello! How can I assist you today? + ``` + + ### Interactive chat + + ```console + docker model run ai/smollm2 + ``` + + Output: + + ```console + Interactive chat mode started. Type '/bye' to exit. + > Hi + Hi there! It's SmolLM, AI assistant. How can I help you today? + > /bye + Chat session ended. + ``` +deprecated: false +hidden: false +experimental: false +experimentalcli: true +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-cli/docs/reference/docker_model_status.yaml b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_status.yaml new file mode 100644 index 000000000000..5b0c33b46972 --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_status.yaml @@ -0,0 +1,25 @@ +command: docker model status +short: Check if the Docker Model Runner is running +long: | + Check whether the Docker Model Runner is running and displays the current inference engine. +usage: docker model status +pname: docker model +plink: docker_model.yaml +options: + - option: json + value_type: bool + default_value: "false" + description: Format output in JSON + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: true +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-cli/docs/reference/docker_model_tag.yaml b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_tag.yaml new file mode 100644 index 000000000000..2aa0b35e58f4 --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_tag.yaml @@ -0,0 +1,14 @@ +command: docker model tag +short: Tag a model +long: | + Specify a particular version or variant of the model. If no tag is provided, Docker defaults to `latest`. +usage: docker model tag SOURCE TARGET +pname: docker model +plink: docker_model.yaml +deprecated: false +hidden: false +experimental: false +experimentalcli: true +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-cli/docs/reference/docker_model_uninstall-runner.yaml b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_uninstall-runner.yaml new file mode 100644 index 000000000000..33f601535538 --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_uninstall-runner.yaml @@ -0,0 +1,34 @@ +command: docker model uninstall-runner +short: Uninstall Docker Model Runner +long: Uninstall Docker Model Runner +usage: docker model uninstall-runner +pname: docker model +plink: docker_model.yaml +options: + - option: images + value_type: bool + default_value: "false" + description: Remove docker/model-runner images + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: models + value_type: bool + default_value: "false" + description: Remove model storage volume + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: true +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-cli/docs/reference/docker_model_unload.yaml b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_unload.yaml new file mode 100644 index 000000000000..ba581bdcef55 --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_unload.yaml @@ -0,0 +1,33 @@ +command: docker model unload +short: Unload running models +long: Unload running models +usage: docker model unload (MODEL [MODEL ...] [--backend BACKEND] | --all) +pname: docker model +plink: docker_model.yaml +options: + - option: all + value_type: bool + default_value: "false" + description: Unload all running models + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: backend + value_type: string + description: Optional backend to target + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: true +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-cli/docs/reference/docker_model_version.yaml b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_version.yaml new file mode 100644 index 000000000000..e391942f6ad0 --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/docker_model_version.yaml @@ -0,0 +1,13 @@ +command: docker model version +short: Show the Docker Model Runner version +long: Show the Docker Model Runner version +usage: docker model version +pname: docker model +plink: docker_model.yaml +deprecated: false +hidden: false +experimental: false +experimentalcli: true +kubernetes: false +swarm: false + diff --git a/_vendor/github.com/docker/model-cli/docs/reference/model.md b/_vendor/github.com/docker/model-cli/docs/reference/model.md new file mode 100644 index 000000000000..f79e25304ac4 --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/model.md @@ -0,0 +1,34 @@ +# docker model + + +Docker Model Runner (EXPERIMENTAL) + +### Subcommands + +| Name | Description | +|:------------------------------------------------|:----------------------------------------------------------------------------------------------------------------------| +| [`df`](model_df.md) | Show Docker Model Runner disk usage | +| [`inspect`](model_inspect.md) | Display detailed information on one model | +| [`install-runner`](model_install-runner.md) | Install Docker Model Runner (Docker Engine only) | +| [`list`](model_list.md) | List the models pulled to your local environment | +| [`logs`](model_logs.md) | Fetch the Docker Model Runner logs | +| [`package`](model_package.md) | Package a GGUF file into a Docker model OCI artifact, with optional licenses, and pushes it to the specified registry | +| [`ps`](model_ps.md) | List running models | +| [`pull`](model_pull.md) | Pull a model from Docker Hub or HuggingFace to your local environment | +| [`push`](model_push.md) | Push a model to Docker Hub | +| [`rm`](model_rm.md) | Remove local models downloaded from Docker Hub | +| [`run`](model_run.md) | Run a model and interact with it using a submitted prompt or chat mode | +| [`status`](model_status.md) | Check if the Docker Model Runner is running | +| [`tag`](model_tag.md) | Tag a model | +| [`uninstall-runner`](model_uninstall-runner.md) | Uninstall Docker Model Runner | +| [`unload`](model_unload.md) | Unload running models | +| [`version`](model_version.md) | Show the Docker Model Runner version | + + + + + +## Description + +Use Docker Model Runner to run and interact with AI models directly from the command line. +For more information, see the [documentation](https://docs.docker.com/ai/model-runner/) diff --git a/_vendor/github.com/docker/model-cli/docs/reference/model_configure.md b/_vendor/github.com/docker/model-cli/docs/reference/model_configure.md new file mode 100644 index 000000000000..81fc1546bd5e --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/model_configure.md @@ -0,0 +1,14 @@ +# docker model configure + + +Configure runtime options for a model + +### Options + +| Name | Type | Default | Description | +|:-----------------|:--------|:--------|:-------------------------| +| `--context-size` | `int64` | `-1` | context size (in tokens) | + + + + diff --git a/_vendor/github.com/docker/model-cli/docs/reference/model_df.md b/_vendor/github.com/docker/model-cli/docs/reference/model_df.md new file mode 100644 index 000000000000..e6a4073670b4 --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/model_df.md @@ -0,0 +1,8 @@ +# docker model df + + +Show Docker Model Runner disk usage + + + + diff --git a/_vendor/github.com/docker/model-cli/docs/reference/model_inspect.md b/_vendor/github.com/docker/model-cli/docs/reference/model_inspect.md new file mode 100644 index 000000000000..7df015093814 --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/model_inspect.md @@ -0,0 +1,15 @@ +# docker model inspect + + +Display detailed information on one model + +### Options + +| Name | Type | Default | Description | +|:-----------------|:-------|:--------|:-------------------------------| +| `--openai` | `bool` | | List model in an OpenAI format | +| `-r`, `--remote` | `bool` | | Show info for remote models | + + + + diff --git a/_vendor/github.com/docker/model-cli/docs/reference/model_install-runner.md b/_vendor/github.com/docker/model-cli/docs/reference/model_install-runner.md new file mode 100644 index 000000000000..970a6976a42e --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/model_install-runner.md @@ -0,0 +1,19 @@ +# docker model install-runner + + +Install Docker Model Runner (Docker Engine only) + +### Options + +| Name | Type | Default | Description | +|:-----------------|:---------|:--------|:-------------------------------------------------| +| `--do-not-track` | `bool` | | Do not track models usage in Docker Model Runner | +| `--gpu` | `string` | `auto` | Specify GPU support (none\|auto\|cuda) | +| `--port` | `uint16` | `12434` | Docker container port for Docker Model Runner | + + + + +## Description + + This command runs implicitly when a docker model command is executed. You can run this command explicitly to add a new configuration. diff --git a/_vendor/github.com/docker/model-cli/docs/reference/model_list.md b/_vendor/github.com/docker/model-cli/docs/reference/model_list.md new file mode 100644 index 000000000000..b6c051f28dd0 --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/model_list.md @@ -0,0 +1,20 @@ +# docker model list + + +List the models pulled to your local environment + +### Aliases + +`docker model list`, `docker model ls` + +### Options + +| Name | Type | Default | Description | +|:----------------|:-------|:--------|:--------------------------------| +| `--json` | `bool` | | List models in a JSON format | +| `--openai` | `bool` | | List models in an OpenAI format | +| `-q`, `--quiet` | `bool` | | Only show model IDs | + + + + diff --git a/_vendor/github.com/docker/model-cli/docs/reference/model_logs.md b/_vendor/github.com/docker/model-cli/docs/reference/model_logs.md new file mode 100644 index 000000000000..8c5810924a18 --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/model_logs.md @@ -0,0 +1,15 @@ +# docker model logs + + +Fetch the Docker Model Runner logs + +### Options + +| Name | Type | Default | Description | +|:-----------------|:-------|:--------|:----------------------------------------------| +| `-f`, `--follow` | `bool` | | View logs with real-time streaming | +| `--no-engines` | `bool` | | Exclude inference engine logs from the output | + + + + diff --git a/_vendor/github.com/docker/model-cli/docs/reference/model_package.md b/_vendor/github.com/docker/model-cli/docs/reference/model_package.md new file mode 100644 index 000000000000..615535fd80dc --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/model_package.md @@ -0,0 +1,17 @@ +# docker model package + + +Package a GGUF file into a Docker model OCI artifact, with optional licenses, and pushes it to the specified registry + +### Options + +| Name | Type | Default | Description | +|:------------------|:--------------|:--------|:--------------------------------------| +| `--context-size` | `uint64` | `0` | context size in tokens | +| `--gguf` | `string` | | absolute path to gguf file (required) | +| `-l`, `--license` | `stringArray` | | absolute path to a license file | +| `--push` | `bool` | | push to registry (required) | + + + + diff --git a/_vendor/github.com/docker/model-cli/docs/reference/model_ps.md b/_vendor/github.com/docker/model-cli/docs/reference/model_ps.md new file mode 100644 index 000000000000..15f5371553f6 --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/model_ps.md @@ -0,0 +1,8 @@ +# docker model ps + + +List running models + + + + diff --git a/_vendor/github.com/docker/model-cli/docs/reference/model_pull.md b/_vendor/github.com/docker/model-cli/docs/reference/model_pull.md new file mode 100644 index 000000000000..246cc59d78af --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/model_pull.md @@ -0,0 +1,32 @@ +# docker model pull + + +Pull a model from Docker Hub or HuggingFace to your local environment + + + + +## Description + +Pull a model to your local environment. Downloaded models also appear in the Docker Desktop Dashboard. + +## Examples + +### Pulling a model from Docker Hub + +```console +docker model pull ai/smollm2 +``` + +### Pulling from HuggingFace + +You can pull GGUF models directly from [Hugging Face](https://huggingface.co/models?library=gguf). + +**Note about quantization:** If no tag is specified, the command tries to pull the `Q4_K_M` version of the model. +If `Q4_K_M` doesn't exist, the command pulls the first GGUF found in the **Files** view of the model on HuggingFace. +To specify the quantization, provide it as a tag, for example: +`docker model pull hf.co/bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_S` + +```console +docker model pull hf.co/bartowski/Llama-3.2-1B-Instruct-GGUF +``` diff --git a/_vendor/github.com/docker/model-cli/docs/reference/model_push.md b/_vendor/github.com/docker/model-cli/docs/reference/model_push.md new file mode 100644 index 000000000000..b50a425e84de --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/model_push.md @@ -0,0 +1,13 @@ +# docker model push + + +Push a model to Docker Hub + + + + +### Example + +```console +docker model push / +``` diff --git a/_vendor/github.com/docker/model-cli/docs/reference/model_rm.md b/_vendor/github.com/docker/model-cli/docs/reference/model_rm.md new file mode 100644 index 000000000000..6463903bd899 --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/model_rm.md @@ -0,0 +1,14 @@ +# docker model rm + + +Remove local models downloaded from Docker Hub + +### Options + +| Name | Type | Default | Description | +|:----------------|:-------|:--------|:----------------------------| +| `-f`, `--force` | `bool` | | Forcefully remove the model | + + + + diff --git a/_vendor/github.com/docker/model-cli/docs/reference/model_run.md b/_vendor/github.com/docker/model-cli/docs/reference/model_run.md new file mode 100644 index 000000000000..3010f26c27ce --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/model_run.md @@ -0,0 +1,51 @@ +# docker model run + + +Run a model and interact with it using a submitted prompt or chat mode + +### Options + +| Name | Type | Default | Description | +|:----------|:-------|:--------|:---------------------| +| `--debug` | `bool` | | Enable debug logging | + + + + +## Description + +When you run a model, Docker calls an inference server API endpoint hosted by the Model Runner through Docker Desktop. The model stays in memory until another model is requested, or until a pre-defined inactivity timeout is reached (currently 5 minutes). + +You do not have to use Docker model run before interacting with a specific model from a host process or from within a container. Model Runner transparently loads the requested model on-demand, assuming it has been pulled and is locally available. + +You can also use chat mode in the Docker Desktop Dashboard when you select the model in the **Models** tab. + +## Examples + +### One-time prompt + +```console +docker model run ai/smollm2 "Hi" +``` + +Output: + +```console +Hello! How can I assist you today? +``` + +### Interactive chat + +```console +docker model run ai/smollm2 +``` + +Output: + +```console +Interactive chat mode started. Type '/bye' to exit. +> Hi +Hi there! It's SmolLM, AI assistant. How can I help you today? +> /bye +Chat session ended. +``` diff --git a/_vendor/github.com/docker/model-cli/docs/reference/model_status.md b/_vendor/github.com/docker/model-cli/docs/reference/model_status.md new file mode 100644 index 000000000000..baa630073db8 --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/model_status.md @@ -0,0 +1,17 @@ +# docker model status + + +Check if the Docker Model Runner is running + +### Options + +| Name | Type | Default | Description | +|:---------|:-------|:--------|:----------------------| +| `--json` | `bool` | | Format output in JSON | + + + + +## Description + +Check whether the Docker Model Runner is running and displays the current inference engine. diff --git a/_vendor/github.com/docker/model-cli/docs/reference/model_tag.md b/_vendor/github.com/docker/model-cli/docs/reference/model_tag.md new file mode 100644 index 000000000000..3f1615e296fc --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/model_tag.md @@ -0,0 +1,11 @@ +# docker model tag + + +Tag a model + + + + +## Description + +Specify a particular version or variant of the model. If no tag is provided, Docker defaults to `latest`. diff --git a/_vendor/github.com/docker/model-cli/docs/reference/model_uninstall-runner.md b/_vendor/github.com/docker/model-cli/docs/reference/model_uninstall-runner.md new file mode 100644 index 000000000000..3c4a79ceb295 --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/model_uninstall-runner.md @@ -0,0 +1,15 @@ +# docker model uninstall-runner + + +Uninstall Docker Model Runner + +### Options + +| Name | Type | Default | Description | +|:-----------|:-------|:--------|:----------------------------------| +| `--images` | `bool` | | Remove docker/model-runner images | +| `--models` | `bool` | | Remove model storage volume | + + + + diff --git a/_vendor/github.com/docker/model-cli/docs/reference/model_unload.md b/_vendor/github.com/docker/model-cli/docs/reference/model_unload.md new file mode 100644 index 000000000000..70d7f8f2884c --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/model_unload.md @@ -0,0 +1,15 @@ +# docker model unload + + +Unload running models + +### Options + +| Name | Type | Default | Description | +|:------------|:---------|:--------|:---------------------------| +| `--all` | `bool` | | Unload all running models | +| `--backend` | `string` | | Optional backend to target | + + + + diff --git a/_vendor/github.com/docker/model-cli/docs/reference/model_version.md b/_vendor/github.com/docker/model-cli/docs/reference/model_version.md new file mode 100644 index 000000000000..eb32c61fd979 --- /dev/null +++ b/_vendor/github.com/docker/model-cli/docs/reference/model_version.md @@ -0,0 +1,8 @@ +# docker model version + + +Show the Docker Model Runner version + + + + diff --git a/_vendor/github.com/docker/scout-cli/docs/docker_scout.yaml b/_vendor/github.com/docker/scout-cli/docs/docker_scout.yaml index 4204e61c52bc..8dbe2951d677 100644 --- a/_vendor/github.com/docker/scout-cli/docs/docker_scout.yaml +++ b/_vendor/github.com/docker/scout-cli/docs/docker_scout.yaml @@ -20,6 +20,7 @@ cname: - docker scout recommendations - docker scout repo - docker scout version + - docker scout watch clink: - docker_scout_attestation.yaml - docker_scout_cache.yaml @@ -36,6 +37,7 @@ clink: - docker_scout_recommendations.yaml - docker_scout_repo.yaml - docker_scout_version.yaml + - docker_scout_watch.yaml options: - option: debug value_type: bool diff --git a/_vendor/github.com/docker/scout-cli/docs/docker_scout_attestation_add.yaml b/_vendor/github.com/docker/scout-cli/docs/docker_scout_attestation_add.yaml index f6850825358b..46a6b2a140c9 100644 --- a/_vendor/github.com/docker/scout-cli/docs/docker_scout_attestation_add.yaml +++ b/_vendor/github.com/docker/scout-cli/docs/docker_scout_attestation_add.yaml @@ -16,6 +16,15 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: org + value_type: string + description: Namespace of the Docker organization + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false - option: predicate-type value_type: string description: Predicate-type for attestations @@ -25,6 +34,26 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: referrer + value_type: bool + default_value: "false" + description: Use OCI referrer API for pushing attestation + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: referrer-repository + value_type: string + default_value: registry.scout.docker.com + description: Repository to push referrer to + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false inherited_options: - option: debug value_type: bool diff --git a/_vendor/github.com/docker/scout-cli/docs/docker_scout_compare.yaml b/_vendor/github.com/docker/scout-cli/docs/docker_scout_compare.yaml index a6a986c0a8d7..efd7ecdf8131 100644 --- a/_vendor/github.com/docker/scout-cli/docs/docker_scout_compare.yaml +++ b/_vendor/github.com/docker/scout-cli/docs/docker_scout_compare.yaml @@ -55,7 +55,7 @@ options: value_type: stringSlice default_value: '[]' description: | - Comma separated list of conditions to fail the action step if worse, options are: vulnerability, policy + Comma separated list of conditions to fail the action step if worse or changed, options are: vulnerability, policy, package deprecated: false hidden: false experimental: false diff --git a/_vendor/github.com/docker/scout-cli/docs/docker_scout_cves.yaml b/_vendor/github.com/docker/scout-cli/docs/docker_scout_cves.yaml index 896c3b4d0bb4..eaef8f8a1b7b 100644 --- a/_vendor/github.com/docker/scout-cli/docs/docker_scout_cves.yaml +++ b/_vendor/github.com/docker/scout-cli/docs/docker_scout_cves.yaml @@ -135,6 +135,25 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: local + value_type: bool + default_value: "false" + description: Local mode + deprecated: false + hidden: true + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: local-vulndb + value_type: string + description: Local vulnerability database + deprecated: false + hidden: true + experimental: false + experimentalcli: false + kubernetes: false + swarm: false - option: locations value_type: bool default_value: "false" diff --git a/_vendor/github.com/docker/scout-cli/docs/docker_scout_integration_list.yaml b/_vendor/github.com/docker/scout-cli/docs/docker_scout_integration_list.yaml index 00db4ef6877e..9c2a5492060d 100644 --- a/_vendor/github.com/docker/scout-cli/docs/docker_scout_integration_list.yaml +++ b/_vendor/github.com/docker/scout-cli/docs/docker_scout_integration_list.yaml @@ -1,5 +1,5 @@ command: docker scout integration list -short: Integration Docker Scout +short: List integrations which can be installed long: | The docker scout integration list configured integrations for an organization. usage: docker scout integration list [INTEGRATION] diff --git a/_vendor/github.com/docker/scout-cli/docs/docker_scout_push.yaml b/_vendor/github.com/docker/scout-cli/docs/docker_scout_push.yaml index b48952c9613a..361b89e3f894 100644 --- a/_vendor/github.com/docker/scout-cli/docs/docker_scout_push.yaml +++ b/_vendor/github.com/docker/scout-cli/docs/docker_scout_push.yaml @@ -63,6 +63,16 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: secrets + value_type: bool + default_value: "false" + description: Scan for secrets in the image + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false - option: timestamp value_type: string description: Timestamp of image or tag creation diff --git a/_vendor/github.com/docker/scout-cli/docs/docker_scout_sbom.yaml b/_vendor/github.com/docker/scout-cli/docs/docker_scout_sbom.yaml index 7a804e37e9d9..d51d5d5d2cbb 100644 --- a/_vendor/github.com/docker/scout-cli/docs/docker_scout_sbom.yaml +++ b/_vendor/github.com/docker/scout-cli/docs/docker_scout_sbom.yaml @@ -93,6 +93,16 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: secrets + value_type: bool + default_value: "false" + description: Scan for secrets in the image + deprecated: false + hidden: true + experimental: false + experimentalcli: false + kubernetes: false + swarm: false inherited_options: - option: debug value_type: bool diff --git a/_vendor/github.com/docker/scout-cli/docs/scout_attestation_add.md b/_vendor/github.com/docker/scout-cli/docs/scout_attestation_add.md index 5f09c0fffda7..5517741c1667 100644 --- a/_vendor/github.com/docker/scout-cli/docs/scout_attestation_add.md +++ b/_vendor/github.com/docker/scout-cli/docs/scout_attestation_add.md @@ -9,10 +9,13 @@ Add attestation to image ### Options -| Name | Type | Default | Description | -|:-------------------|:--------------|:--------|:----------------------------------------| -| `--file` | `stringSlice` | | File location of attestations to attach | -| `--predicate-type` | `string` | | Predicate-type for attestations | +| Name | Type | Default | Description | +|:------------------------|:--------------|:----------------------------|:---------------------------------------------| +| `--file` | `stringSlice` | | File location of attestations to attach | +| `--org` | `string` | | Namespace of the Docker organization | +| `--predicate-type` | `string` | | Predicate-type for attestations | +| `--referrer` | | | Use OCI referrer API for pushing attestation | +| `--referrer-repository` | `string` | `registry.scout.docker.com` | Repository to push referrer to | diff --git a/_vendor/github.com/docker/scout-cli/docs/scout_compare.md b/_vendor/github.com/docker/scout-cli/docs/scout_compare.md index f25aa8635501..569dab660df6 100644 --- a/_vendor/github.com/docker/scout-cli/docs/scout_compare.md +++ b/_vendor/github.com/docker/scout-cli/docs/scout_compare.md @@ -11,7 +11,7 @@ Compare two images and display differences (experimental) | Name | Type | Default | Description | |:----------------------|:--------------|:--------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `-x`, `--exit-on` | `stringSlice` | | Comma separated list of conditions to fail the action step if worse, options are: vulnerability, policy | +| `-x`, `--exit-on` | `stringSlice` | | Comma separated list of conditions to fail the action step if worse or changed, options are: vulnerability, policy, package | | `--format` | `string` | `text` | Output format of the generated vulnerability report:
- text: default output, plain text with or without colors depending on the terminal
- markdown: Markdown output
| | `--hide-policies` | | | Hide policy status from the output | | `--ignore-base` | | | Filter out CVEs introduced from base image | diff --git a/_vendor/github.com/docker/scout-cli/docs/scout_integration.md b/_vendor/github.com/docker/scout-cli/docs/scout_integration.md index 9a2def3a0b8f..6e2a5cff1f2c 100644 --- a/_vendor/github.com/docker/scout-cli/docs/scout_integration.md +++ b/_vendor/github.com/docker/scout-cli/docs/scout_integration.md @@ -9,9 +9,8 @@ Commands to list, configure, and delete Docker Scout integrations |:----------------------------------------------|:----------------------------------------------------| | [`configure`](scout_integration_configure.md) | Configure or update a new integration configuration | | [`delete`](scout_integration_delete.md) | Delete a new integration configuration | -| [`list`](scout_integration_list.md) | Integration Docker Scout | +| [`list`](scout_integration_list.md) | List integrations which can be installed | - diff --git a/_vendor/github.com/docker/scout-cli/docs/scout_integration_list.md b/_vendor/github.com/docker/scout-cli/docs/scout_integration_list.md index 67b39c59fc57..5e906f9210ee 100644 --- a/_vendor/github.com/docker/scout-cli/docs/scout_integration_list.md +++ b/_vendor/github.com/docker/scout-cli/docs/scout_integration_list.md @@ -1,7 +1,7 @@ # docker scout integration list -Integration Docker Scout +List integrations which can be installed ### Options @@ -12,4 +12,3 @@ Integration Docker Scout - diff --git a/_vendor/github.com/docker/scout-cli/docs/scout_push.md b/_vendor/github.com/docker/scout-cli/docs/scout_push.md index 09e3397e5c76..3e97c6be94c8 100644 --- a/_vendor/github.com/docker/scout-cli/docs/scout_push.md +++ b/_vendor/github.com/docker/scout-cli/docs/scout_push.md @@ -13,6 +13,7 @@ Push an image or image index to Docker Scout | `-o`, `--output` | `string` | | Write the report to a file | | `--platform` | `string` | | Platform of image to be pushed | | `--sbom` | | | Create and upload SBOMs | +| `--secrets` | | | Scan for secrets in the image | | `--timestamp` | `string` | | Timestamp of image or tag creation | diff --git a/_vendor/github.com/moby/buildkit/docs/attestations/slsa-definitions.md b/_vendor/github.com/moby/buildkit/docs/attestations/slsa-definitions.md index 08b692df3d3b..0053e1e12d28 100644 --- a/_vendor/github.com/moby/buildkit/docs/attestations/slsa-definitions.md +++ b/_vendor/github.com/moby/buildkit/docs/attestations/slsa-definitions.md @@ -2,22 +2,436 @@ title: SLSA definitions --- -BuildKit supports the [creation of SLSA Provenance](./slsa-provenance.md) for builds that -it runs. +BuildKit supports the [creation of SLSA Provenance](./slsa-provenance.md) for +builds that it runs. The provenance format generated by BuildKit is defined by the -[SLSA Provenance format](https://slsa.dev/provenance/v0.2). +SLSA Provenance format (supports both [v0.2](https://slsa.dev/spec/v0.2/provenance) +and [v1](https://slsa.dev/spec/v1.1/provenance)). This page describes how BuildKit populate each field, and whether the field gets included when you generate attestations `mode=min` and `mode=max`. -## `builder.id` +## SLSA v1 -Corresponds to [SLSA `builder.id`](https://slsa.dev/provenance/v0.2#builder.id). +### `buildDefinition.buildType` + +* Ref: https://slsa.dev/spec/v1.1/provenance#buildType +* Included with `mode=min` and `mode=max`. + +The `buildDefinition.buildType` field is set to `https://github.com/moby/buildkit/blob/master/docs/attestations/slsa-definitions.md` +and can be used to determine the structure of the provenance content. + +```json + "buildDefinition": { + "buildType": "https://github.com/moby/buildkit/blob/master/docs/attestations/slsa-definitions.md", + ... + } +``` + +### `buildDefinition.externalParameters.configSource` + +* Ref: https://slsa.dev/spec/v1.1/provenance#externalParameters +* Included with `mode=min` and `mode=max`. + +Describes the config that initialized the build. + +```json + "buildDefinition": { + "externalParameters": { + "configSource": { + "uri": "https://github.com/moby/buildkit.git#refs/tags/v0.11.0", + "digest": { + "sha1": "4b220de5058abfd01ff619c9d2ff6b09a049bea0" + }, + "path": "Dockerfile" + }, + ... + }, + } +``` + +For builds initialized from a remote context, like a Git or HTTP URL, this +object defines the context URL and its immutable digest in the `uri` and +`digest` fields. For builds using a local frontend, such as a Dockerfile, the +`path` field defines the path for the frontend file that initialized the build +(`filename` frontend option). + +### `buildDefinition.externalParameters.request` + +* Ref: https://slsa.dev/spec/v1.1/provenance#externalParameters +* Partially included with `mode=min`. + +Describes build inputs passed to the build. + +```json + "buildDefinition": { + "externalParameters": { + "request": { + "frontend": "gateway.v0", + "args": { + "build-arg:BUILDKIT_CONTEXT_KEEP_GIT_DIR": "1", + "label:FOO": "bar", + "source": "docker/dockerfile-upstream:master", + "target": "release" + }, + "secrets": [ + { + "id": "GIT_AUTH_HEADER", + "optional": true + }, + ... + ], + "ssh": [], + "locals": [] + }, + ... + }, + } +``` + +The following fields are included with both `mode=min` and `mode=max`: + +- `locals` lists any local sources used in the build, including the build + context and frontend file. +- `frontend` defines type of BuildKit frontend used for the build. Currently, + this can be `dockerfile.v0` or `gateway.v0`. +- `args` defines the build arguments passed to the BuildKit frontend. + + The keys inside the `args` object reflect the options as BuildKit receives + them. For example, `build-arg` and `label` prefixes are used for build + arguments and labels, and `target` key defines the target stage that was + built. The `source` key defines the source image for the Gateway frontend, if + used. + +The following fields are only included with `mode=max`: + +- `secrets` defines secrets used during the build. Note that actual secret + values are not included. +- `ssh` defines the ssh forwards used during the build. + +### `buildDefinition.internalParameters.buildConfig` + +* Ref: https://slsa.dev/spec/v1.1/provenance#internalParameters +* Only included with `mode=max`. + +Defines the build steps performed during the build. + +BuildKit internally uses LLB definition to execute the build steps. The LLB +definition of the build steps is defined in the +`buildDefinition.internalParameters.buildConfig.llbDefinition` field. + +Each LLB step is the JSON definition of the +[LLB ProtoBuf API](https://github.com/moby/buildkit/blob/v0.10.0/solver/pb/ops.proto). +The dependencies for a vertex in the LLB graph can be found in the `inputs` +field for every step. + +```json + "buildDefinition": { + "internalParameters": { + "buildConfig": { + "llbDefinition": [ + { + "id": "step0", + "op": { + "Op": { + "exec": { + "meta": { + "args": [ + "/bin/sh", + "-c", + "go build ." + ], + "env": [ + "PATH=/go/bin:/usr/local/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "GOPATH=/go", + "GOFLAGS=-mod=vendor", + ], + "cwd": "/src", + }, + "mounts": [...] + } + }, + "platform": {...}, + }, + "inputs": [ + "step8:0", + "step2:0", + ] + }, + ... + ] + }, + } + } +``` + +### `buildDefinition.internalParameters.builderPlatform` + +* Ref: https://slsa.dev/spec/v1.1/provenance#internalParameters +* Included with `mode=min` and `mode=max`. + +```json + "buildDefinition": { + "internalParameters": { + "builderPlatform": "linux/amd64" + ... + }, + } +``` + +BuildKit sets the `builderPlatform` of the build machine. Note that this is not +necessarily the platform of the build result that can be determined from the +`in-toto` subject field. + +### `buildDefinition.resolvedDependencies` + +* Ref: https://slsa.dev/spec/v1.1/provenance#resolvedDependencies +* Included with `mode=min` and `mode=max`. + +Defines all the external artifacts that were part of the build. The value +depends on the type of artifact: + +- The URL of Git repositories containing source code for the image +- HTTP URLs if you are building from a remote tarball, or that was included + using an `ADD` command in Dockerfile +- Any Docker images used during the build + +The URLs to the Docker images will be in +[Package URL](https://github.com/package-url/purl-spec) format. + +All the build materials will include the immutable checksum of the artifact. +When building from a mutable tag, you can use the digest information to +determine if the artifact has been updated compared to when the build ran. + +```json + "buildDefinition": { + "resolvedDependencies": [ + { + "uri": "pkg:docker/alpine@3.17?platform=linux%2Famd64", + "digest": { + "sha256": "8914eb54f968791faf6a8638949e480fef81e697984fba772b3976835194c6d4" + } + }, + { + "uri": "https://github.com/moby/buildkit.git#refs/tags/v0.11.0", + "digest": { + "sha1": "4b220de5058abfd01ff619c9d2ff6b09a049bea0" + } + }, + ... + ], + ... + } +``` + +### `runDetails.builder.id` + +* Ref: https://slsa.dev/spec/v1.1/provenance#builder.id +* Included with `mode=min` and `mode=max`. + +The field is set to the URL of the build, if available. + +```json + "runDetails": { + "builder": { + "id": "https://github.com/docker/buildx/actions/runs/3709599520" + ... + }, + ... + } +``` + +> [!NOTE] +> This value can be set using the `builder-id` attestation parameter. + +### `runDetails.metadata.invocationID` + +* Ref: https://slsa.dev/spec/v1.1/provenance#invocationId +* Included with `mode=min` and `mode=max`. + +Unique identifier for the build invocation. When building a multi-platform image +with a single build request, this value will be the shared by all the platform +versions of the image. + +```json + "runDetails": { + "metadata": { + "invocationID": "rpv7a389uzil5lqmrgwhijwjz", + ... + }, + ... + } +``` + +### `runDetails.metadata.startedOn` + +* Ref: https://slsa.dev/spec/v1.1/provenance#startedOn +* Included with `mode=min` and `mode=max`. + +Timestamp when the build started. + +```json + "runDetails": { + "metadata": { + "startedOn": "2021-11-17T15:00:00Z", + ... + }, + ... + } +``` + +### `runDetails.metadata.finishedOn` + +* Ref: https://slsa.dev/spec/v1.1/provenance#finishedOn +* Included with `mode=min` and `mode=max`. + +Timestamp when the build finished. + +```json + "runDetails": { + "metadata": { + "finishedOn": "2021-11-17T15:01:00Z", + ... + }, + } +``` + +### `runDetails.metadata.buildkit_metadata` + +* Ref: https://slsa.dev/spec/v1.1/provenance#extension-fields +* Partially included with `mode=min`. + +This extension field defines BuildKit-specific additional metadata that is not +part of the SLSA provenance spec. + +```json + "runDetails": { + "metadata": { + "buildkit_metadata": { + "source": {...}, + "layers": {...}, + "vcs": {...}, + }, + ... + }, + } +``` + +#### `source` + +Only included with `mode=max`. + +Defines a source mapping of LLB build steps, defined in the +`buildDefinition.internalParameters.buildConfig.llbDefinition` field, to their +original source code (for example, Dockerfile commands). The `source.locations` +field contains the ranges of all the Dockerfile commands ran in an LLB step. +`source.infos` array contains the source code itself. This mapping is present +if the BuildKit frontend provided it when creating the LLB definition. + +#### `layers` + +Only included with `mode=max`. + +Defines the layer mapping of LLB build step mounts defined in +`buildDefinition.internalParameters.buildConfig.llbDefinition` to the OCI +descriptors of equivalent layers. This mapping is present if the layer data was +available, usually when attestation is for an image or if the build step pulled +in image data as part of the build. + +#### `vcs` Included with `mode=min` and `mode=max`. -The `builder.id` field is set to the URL of the build, if available. +Defines optional metadata for the version control system used for the build. If +a build uses a remote context from Git repository, BuildKit extracts the details +of the version control system automatically and displays it in the +`buildDefinition.externalParameters.configSource` field. But if the build uses +a source from a local directory, the VCS information is lost even if the +directory contained a Git repository. In this case, the build client can send +additional `vcs:source` and `vcs:revision` build options and BuildKit will add +them to the provenance attestations as extra metadata. Note that, contrary to +the `buildDefinition.externalParameters.configSource` field, BuildKit doesn't +verify the `vcs` values, and as such they can't be trusted and should only be +used as a metadata hint. + +### `runDetails.metadata.buildkit_hermetic` + +* Ref: https://slsa.dev/spec/v1.1/provenance#extension-fields +* Included with `mode=min` and `mode=max`. + +This extension field is set to true if the build was hermetic and did not access +the network. In Dockerfiles, a build is hermetic if it does not use `RUN` +commands or disables network with `--network=none` flag. + +```json + "runDetails": { + "metadata": { + "buildkit_hermetic": true, + ... + }, + } +``` + +### `runDetails.metadata.buildkit_completeness` + +* Ref: https://slsa.dev/spec/v1.1/provenance#extension-fields +* Included with `mode=min` and `mode=max`. + +This extension field defines if the provenance information is complete. It is +similar to `metadata.completeness` field in SLSA v0.2. + +`buildkit_completeness.request` is true if all the build arguments are included +in the `buildDefinition.externalParameters.request` field. When building with +`min` mode, the build arguments are not included in the provenance information +and request is not complete. Request is also not complete on direct LLB builds +that did not use a frontend. + +`buildkit_completeness.resolvedDependencies` is true if +`buildDefinition.resolvedDependencies` field includes all the dependencies of +the build. When building from un-tracked source in a local directory, the +dependencies are not complete, while when building from a remote Git repository +all dependencies can be tracked by BuildKit and +`buildkit_completeness.resolvedDependencies` is true. + +```json + "runDetails": { + "metadata": { + "buildkit_completeness": { + "request": true, + "resolvedDependencies": true + }, + ... + }, + } +``` + +### `runDetails.metadata.buildkit_reproducible` + +* Ref: https://slsa.dev/spec/v1.1/provenance#extension-fields +* Included with `mode=min` and `mode=max`. + +This extension field defines if the build result is supposed to be byte-by-byte +reproducible. It is similar to `metadata.reproducible` field in SLSA v0.2. This +value can be set by the user with the `reproducible=true` attestation parameter. + +```json + "runDetails": { + "metadata": { + "buildkit_reproducible": false, + ... + }, + } +``` + +## SLSA v0.2 + +### `builder.id` + +* Ref: https://slsa.dev/spec/v0.2/provenance#builder.id +* Included with `mode=min` and `mode=max`. + +The field is set to the URL of the build, if available. ```json "builder": { @@ -25,26 +439,25 @@ The `builder.id` field is set to the URL of the build, if available. }, ``` -This value can be set using the `builder-id` attestation parameter. - -## `buildType` +> [!NOTE] +> This value can be set using the `builder-id` attestation parameter. -Corresponds to [SLSA `buildType`](https://slsa.dev/provenance/v0.2#buildType). +### `buildType` -Included with `mode=min` and `mode=max`. +* Ref: https://slsa.dev/spec/v0.2/provenance#buildType +* Included with `mode=min` and `mode=max`. -The `buildType` field is set to `https://mobyproject.org/buildkit@v1` can be +The `buildType` field is set to `https://mobyproject.org/buildkit@v1` and can be used to determine the structure of the provenance content. ```json "buildType": "https://mobyproject.org/buildkit@v1", ``` -## `invocation.configSource` +### `invocation.configSource` -Corresponds to [SLSA `invocation.configSource`](https://slsa.dev/provenance/v0.2#invocation.configSource). - -Included with `mode=min` and `mode=max`. +* Ref: https://slsa.dev/spec/v0.2/provenance#invocation.configSource +* Included with `mode=min` and `mode=max`. Describes the config that initialized the build. @@ -62,15 +475,15 @@ Describes the config that initialized the build. ``` For builds initialized from a remote context, like a Git or HTTP URL, this -object defines the context URL and its immutable digest in the `uri` and `digest` fields. -For builds using a local frontend, such as a Dockerfile, the `entryPoint` field defines the path -for the frontend file that initialized the build (`filename` frontend option). +object defines the context URL and its immutable digest in the `uri` and +`digest` fields. For builds using a local frontend, such as a Dockerfile, the +`entryPoint` field defines the path for the frontend file that initialized the +build (`filename` frontend option). -## `invocation.parameters` +### `invocation.parameters` -Corresponds to [SLSA `invocation.parameters`](https://slsa.dev/provenance/v0.2#invocation.parameters). - -Partially included with `mode=min`. +* Ref: https://slsa.dev/spec/v0.2/provenance#invocation.parameters +* Partially included with `mode=min`. Describes build inputs passed to the build. @@ -118,11 +531,10 @@ The following fields are only included with `mode=max`: values are not included. - `ssh` defines the ssh forwards used during the build. -## `invocation.environment` +### `invocation.environment` -Corresponds to [SLSA `invocation.environment`](https://slsa.dev/provenance/v0.2#invocation.environment). - -Included with `mode=min` and `mode=max`. +* Ref: https://slsa.dev/spec/v0.2/provenance#invocation.environment +* Included with `mode=min` and `mode=max`. ```json "invocation": { @@ -137,11 +549,10 @@ The only value BuildKit currently sets is the `platform` of the current build machine. Note that this is not necessarily the platform of the build result that can be determined from the `in-toto` subject field. -## `materials` - -Corresponds to [SLSA `materials`](https://slsa.dev/provenance/v0.2#materials). +### `materials` -Included with `mode=min` and `mode=max`. +* Ref: https://slsa.dev/spec/v0.2/provenance#materials +* Included with `mode=min` and `mode=max`. Defines all the external artifacts that were part of the build. The value depends on the type of artifact: @@ -176,11 +587,10 @@ determine if the artifact has been updated compared to when the build ran. ], ``` -## `buildConfig` +### `buildConfig` -Corresponds to [SLSA `buildConfig`](https://slsa.dev/provenance/v0.2#buildConfig). - -Only included with `mode=max`. +* Ref: https://slsa.dev/spec/v0.2/provenance#buildConfig +* Only included with `mode=max`. Defines the build steps performed during the build. @@ -228,11 +638,10 @@ field for every step. }, ``` -## `metadata.buildInvocationId` +### `metadata.buildInvocationId` -Corresponds to [SLSA `metadata.buildInvocationId`](https://slsa.dev/provenance/v0.2#metadata.buildIncocationId). - -Included with `mode=min` and `mode=max`. +* Ref: https://slsa.dev/spec/v0.2/provenance#buildInvocationId +* Included with `mode=min` and `mode=max`. Unique identifier for the build invocation. When building a multi-platform image with a single build request, this value will be the shared by all the platform @@ -245,11 +654,10 @@ versions of the image. }, ``` -## `metadata.buildStartedOn` +### `metadata.buildStartedOn` -Corresponds to [SLSA `metadata.buildStartedOn`](https://slsa.dev/provenance/v0.2#metadata.buildStartedOn). - -Included with `mode=min` and `mode=max`. +* Ref: https://slsa.dev/spec/v0.2/provenance#buildStartedOn +* Included with `mode=min` and `mode=max`. Timestamp when the build started. @@ -260,11 +668,10 @@ Timestamp when the build started. }, ``` -## `metadata.buildFinishedOn` - -Corresponds to [SLSA `metadata.buildFinishedOn`](https://slsa.dev/provenance/v0.2#metadata.buildFinishedOn). +### `metadata.buildFinishedOn` -Included with `mode=min` and `mode=max`. +* Ref: https://slsa.dev/spec/v0.2/provenance#buildFinishedOn +* Included with `mode=min` and `mode=max`. Timestamp when the build finished. @@ -275,19 +682,18 @@ Timestamp when the build finished. }, ``` -## `metadata.completeness` - -Corresponds to [SLSA `metadata.completeness`](https://slsa.dev/provenance/v0.2#metadata.completeness). +### `metadata.completeness` -Included with `mode=min` and `mode=max`. +* Ref: https://slsa.dev/spec/v0.2/provenance#metadata.completeness +* Included with `mode=min` and `mode=max`. Defines if the provenance information is complete. `completeness.parameters` is true if all the build arguments are included in the -`invocation.parameters` field. When building with `min` mode, the build -arguments are not included in the provenance information and parameters are not -complete. Parameters are also not complete on direct LLB builds that did not use -a frontend. +`parameters` field. When building with `min` mode, the build arguments are not +included in the provenance information and parameters are not complete. +Parameters are also not complete on direct LLB builds that did not use a +frontend. `completeness.environment` is always true for BuildKit builds. @@ -308,9 +714,10 @@ is true. }, ``` -## `metadata.reproducible` +### `metadata.reproducible` -Corresponds to [SLSA `metadata.reproducible`](https://slsa.dev/provenance/v0.2#metadata.reproducible). +* Ref: https://slsa.dev/spec/v0.2/provenance#metadata.reproducible +* Included with `mode=min` and `mode=max`. Defines if the build result is supposed to be byte-by-byte reproducible. This value can be set by the user with the `reproducible=true` attestation parameter. @@ -322,7 +729,7 @@ value can be set by the user with the `reproducible=true` attestation parameter. }, ``` -## `metadata.https://mobyproject.org/buildkit@v1#hermetic` +### `metadata.https://mobyproject.org/buildkit@v1#hermetic` Included with `mode=min` and `mode=max`. @@ -337,7 +744,7 @@ commands or disables network with `--network=none` flag. }, ``` -## `metadata.https://mobyproject.org/buildkit@v1#metadata` +### `metadata.https://mobyproject.org/buildkit@v1#metadata` Partially included with `mode=min`. @@ -355,7 +762,7 @@ part of the SLSA provenance spec. }, ``` -### `source` +#### `source` Only included with `mode=max`. @@ -366,7 +773,7 @@ the Dockerfile commands ran in an LLB step. `source.infos` array contains the source code itself. This mapping is present if the BuildKit frontend provided it when creating the LLB definition. -### `layers` +#### `layers` Only included with `mode=max`. @@ -375,7 +782,7 @@ Defines the layer mapping of LLB build step mounts defined in mapping is present if the layer data was available, usually when attestation is for an image or if the build step pulled in image data as part of the build. -### `vcs` +#### `vcs` Included with `mode=min` and `mode=max`. @@ -389,227 +796,3 @@ repository. In this case, the build client can send additional `vcs:source` and attestations as extra metadata. Note that, contrary to the `invocation.configSource` field, BuildKit doesn't verify the `vcs` values, and as such they can't be trusted and should only be used as a metadata hint. - -## Output - -To inspect the provenance that was generated and attached to a container image, -you can use the `docker buildx imagetools` command to inspect the image in a -registry. Inspecting the attestation displays the format described in the -[attestation storage specification](./attestation-storage.md). - -For example, inspecting a simple Docker image based on `alpine:latest` results -in a provenance attestation similar to the following, for a `mode=min` build: - -```json -{ - "_type": "https://in-toto.io/Statement/v0.1", - "predicateType": "https://slsa.dev/provenance/v0.2", - "subject": [ - { - "name": "pkg:docker//@?platform=", - "digest": { - "sha256": "e8275b2b76280af67e26f068e5d585eb905f8dfd2f1918b3229db98133cb4862" - } - } - ], - "predicate": { - "builder": { - "id": "" - }, - "buildType": "https://mobyproject.org/buildkit@v1", - "materials": [ - { - "uri": "pkg:docker/alpine@latest?platform=linux%2Famd64", - "digest": { - "sha256": "8914eb54f968791faf6a8638949e480fef81e697984fba772b3976835194c6d4" - } - } - ], - "invocation": { - "configSource": { - "entryPoint": "Dockerfile" - }, - "parameters": { - "frontend": "dockerfile.v0", - "args": {}, - "locals": [ - { - "name": "context" - }, - { - "name": "dockerfile" - } - ] - }, - "environment": { - "platform": "linux/amd64" - } - }, - "metadata": { - "buildInvocationID": "yirbp1aosi1vqjmi3z6bc75nb", - "buildStartedOn": "2022-12-08T11:48:59.466513707Z", - "buildFinishedOn": "2022-12-08T11:49:01.256820297Z", - "reproducible": false, - "completeness": { - "parameters": true, - "environment": true, - "materials": false - }, - "https://mobyproject.org/buildkit@v1#metadata": {} - } - } -} -``` - -For a similar build, but with `mode=max`: - -```json -{ - "_type": "https://in-toto.io/Statement/v0.1", - "predicateType": "https://slsa.dev/provenance/v0.2", - "subject": [ - { - "name": "pkg:docker//@?platform=", - "digest": { - "sha256": "e8275b2b76280af67e26f068e5d585eb905f8dfd2f1918b3229db98133cb4862" - } - } - ], - "predicate": { - "builder": { - "id": "" - }, - "buildType": "https://mobyproject.org/buildkit@v1", - "materials": [ - { - "uri": "pkg:docker/alpine@latest?platform=linux%2Famd64", - "digest": { - "sha256": "8914eb54f968791faf6a8638949e480fef81e697984fba772b3976835194c6d4" - } - } - ], - "invocation": { - "configSource": { - "entryPoint": "Dockerfile" - }, - "parameters": { - "frontend": "dockerfile.v0", - "args": {}, - "locals": [ - { - "name": "context" - }, - { - "name": "dockerfile" - } - ] - }, - "environment": { - "platform": "linux/amd64" - } - }, - "buildConfig": { - "llbDefinition": [ - { - "id": "step0", - "op": { - "Op": { - "source": { - "identifier": "docker-image://docker.io/library/alpine:latest@sha256:8914eb54f968791faf6a8638949e480fef81e697984fba772b3976835194c6d4" - } - }, - "platform": { - "Architecture": "amd64", - "OS": "linux" - }, - "constraints": {} - } - }, - { - "id": "step1", - "op": { - "Op": null - }, - "inputs": ["step0:0"] - } - ] - }, - "metadata": { - "buildInvocationID": "46ue2x93k3xj5l463dektwldw", - "buildStartedOn": "2022-12-08T11:50:54.953375437Z", - "buildFinishedOn": "2022-12-08T11:50:55.447841328Z", - "reproducible": false, - "completeness": { - "parameters": true, - "environment": true, - "materials": false - }, - "https://mobyproject.org/buildkit@v1#metadata": { - "source": { - "locations": { - "step0": { - "locations": [ - { - "ranges": [ - { - "start": { - "line": 1 - }, - "end": { - "line": 1 - } - } - ] - } - ] - } - }, - "infos": [ - { - "filename": "Dockerfile", - "data": "RlJPTSBhbHBpbmU6bGF0ZXN0Cg==", - "llbDefinition": [ - { - "id": "step0", - "op": { - "Op": { - "source": { - "identifier": "local://dockerfile", - "attrs": { - "local.differ": "none", - "local.followpaths": "[\"Dockerfile\",\"Dockerfile.dockerignore\",\"dockerfile\"]", - "local.session": "q2jnwdkas0i0iu4knchd92jaz", - "local.sharedkeyhint": "dockerfile" - } - } - }, - "constraints": {} - } - }, - { - "id": "step1", - "op": { - "Op": null - }, - "inputs": ["step0:0"] - } - ] - } - ] - }, - "layers": { - "step0:0": [ - [ - { - "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", - "digest": "sha256:c158987b05517b6f2c5913f3acef1f2182a32345a304fe357e3ace5fadcad715", - "size": 3370706 - } - ] - ] - } - } - } - } -} -``` diff --git a/_vendor/github.com/moby/buildkit/docs/buildkitd.toml.md b/_vendor/github.com/moby/buildkit/docs/buildkitd.toml.md index 71061c626868..3565a28a2173 100644 --- a/_vendor/github.com/moby/buildkit/docs/buildkitd.toml.md +++ b/_vendor/github.com/moby/buildkit/docs/buildkitd.toml.md @@ -46,6 +46,13 @@ insecure-entitlements = [ "network.host", "security.insecure" ] # OTEL collector trace socket path socketPath = "/run/buildkit/otel-grpc.sock" +[cdi] + # Disables support of the Container Device Interface (CDI). + disabled = true + # List of directories to scan for CDI spec files. For more details about CDI + # specification, please refer to https://github.com/cncf-tags/container-device-interface/blob/main/SPEC.md#cdi-json-specification + specDirs = ["/etc/cdi", "/var/run/cdi", "/etc/buildkit/cdi"] + # config for build history API that stores information about completed build commands [history] # maxAge is the maximum age of history entries to keep, in seconds. @@ -169,7 +176,9 @@ insecure-entitlements = [ "network.host", "security.insecure" ] [registry."docker.io"] # mirror configuration to handle path in case a mirror registry requires a /project path rather than just a host:port mirrors = ["yourmirror.local:5000", "core.harbor.domain/proxy.docker.io"] + # Use plain HTTP to connect to the mirrors. http = true + # Use HTTPS with self-signed certificates. Do not enable this together with `http`. insecure = true ca=["/etc/config/myca.pem"] [[registry."docker.io".keypair]] diff --git a/_vendor/github.com/moby/buildkit/frontend/dockerfile/docs/reference.md b/_vendor/github.com/moby/buildkit/frontend/dockerfile/docs/reference.md index 0afa620e2cd2..5b32b7f01e13 100644 --- a/_vendor/github.com/moby/buildkit/frontend/dockerfile/docs/reference.md +++ b/_vendor/github.com/moby/buildkit/frontend/dockerfile/docs/reference.md @@ -689,7 +689,8 @@ EOF The available `[OPTIONS]` for the `RUN` instruction are: | Option | Minimum Dockerfile version | -| ------------------------------- | -------------------------- | +|---------------------------------|----------------------------| +| [`--device`](#run---device) | 1.14-labs | | [`--mount`](#run---mount) | 1.2 | | [`--network`](#run---network) | 1.3 | | [`--security`](#run---security) | 1.1.2-labs | @@ -707,6 +708,87 @@ guide](https://docs.docker.com/engine/userguide/eng-image/dockerfile_best-practi The cache for `RUN` instructions can be invalidated by [`ADD`](#add) and [`COPY`](#copy) instructions. +### RUN --device + +> [!NOTE] +> Not yet available in stable syntax, use [`docker/dockerfile:1-labs`](#syntax) +> version. It also needs BuildKit 0.20.0 or later. + +```dockerfile +RUN --device=name,[required] +``` + +`RUN --device` allows build to request [CDI devices](https://github.com/moby/buildkit/blob/master/docs/cdi.md) +to be available to the build step. + +The device `name` is provided by the CDI specification registered in BuildKit. + +In the following example, multiple devices are registered in the CDI +specification for the `vendor1.com/device` vendor. + +```yaml +cdiVersion: "0.6.0" +kind: "vendor1.com/device" +devices: + - name: foo + containerEdits: + env: + - FOO=injected + - name: bar + annotations: + org.mobyproject.buildkit.device.class: class1 + containerEdits: + env: + - BAR=injected + - name: baz + annotations: + org.mobyproject.buildkit.device.class: class1 + containerEdits: + env: + - BAZ=injected + - name: qux + annotations: + org.mobyproject.buildkit.device.class: class2 + containerEdits: + env: + - QUX=injected +``` + +The device name format is flexible and accepts various patterns to support +multiple device configurations: + +* `vendor1.com/device`: request the first device found for this vendor +* `vendor1.com/device=foo`: request a specific device +* `vendor1.com/device=*`: request all devices for this vendor +* `class1`: request devices by `org.mobyproject.buildkit.device.class` annotation + +#### Example: CUDA-Powered LLaMA Inference + +In this example we use the `--device` flag to run `llama.cpp` inference using +an NVIDIA GPU device through CDI: + +```dockerfile +# syntax=docker/dockerfile:1-labs + +FROM scratch AS model +ADD https://huggingface.co/bartowski/Llama-3.2-1B-Instruct-GGUF/resolve/main/Llama-3.2-1B-Instruct-Q4_K_M.gguf /model.gguf + +FROM scratch AS prompt +COPY < **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. +


+ > **Deprecated**: Pushing nondistributable artifacts is now always enabled + > and this field is always `null`. type: "array" items: type: "string" - example: ["::1/128", "127.0.0.0/8"] + example: [] AllowNondistributableArtifactsHostnames: description: | List of registry hostnames to which nondistributable artifacts can be pushed, using the format `[:]` or `[:]`. - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior for the specified - registries. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. +


- > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. + > **Deprecated**: Pushing nondistributable artifacts is now always enabled + > and this field is always `null`. type: "array" items: type: "string" - example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"] + example: [] InsecureRegistryCIDRs: description: | List of IP ranges of insecure registries, using the CIDR syntax diff --git a/_vendor/github.com/moby/moby/docs/api/v1.33.yaml b/_vendor/github.com/moby/moby/docs/api/v1.33.yaml index 9853899d8803..4d49c4156bd4 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.33.yaml +++ b/_vendor/github.com/moby/moby/docs/api/v1.33.yaml @@ -3333,8 +3333,11 @@ definitions: com.example.some-other-label: "some-other-value" Data: description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-3.2)) - data to store as secret. + Data is the data to store as a secret, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + It must be empty if the Driver field is set, in which case the data is + loaded from an external secret store. The maximum allowed size is 500KB, + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). This field is only used to _create_ a secret, and is not returned by other endpoints. @@ -3376,8 +3379,9 @@ definitions: type: "string" Data: description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-3.2)) - config data. + Data is the data to store as a config, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). type: "string" Config: @@ -3881,55 +3885,27 @@ definitions: List of IP ranges to which nondistributable artifacts can be pushed, using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior, and enables the daemon to - push nondistributable artifacts to all registries whose resolved IP - address is within the subnet described by the CIDR syntax. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. - - > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. +


+ > **Deprecated**: Pushing nondistributable artifacts is now always enabled + > and this field is always `null`. type: "array" items: type: "string" - example: ["::1/128", "127.0.0.0/8"] + example: [] AllowNondistributableArtifactsHostnames: description: | List of registry hostnames to which nondistributable artifacts can be pushed, using the format `[:]` or `[:]`. - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior for the specified - registries. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. +


- > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. + > **Deprecated**: Pushing nondistributable artifacts is now always enabled + > and this field is always `null`. type: "array" items: type: "string" - example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"] + example: [] InsecureRegistryCIDRs: description: | List of IP ranges of insecure registries, using the CIDR syntax diff --git a/_vendor/github.com/moby/moby/docs/api/v1.34.yaml b/_vendor/github.com/moby/moby/docs/api/v1.34.yaml index b91aed4d2ccb..2d0d987fb2ba 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.34.yaml +++ b/_vendor/github.com/moby/moby/docs/api/v1.34.yaml @@ -3361,8 +3361,11 @@ definitions: com.example.some-other-label: "some-other-value" Data: description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-3.2)) - data to store as secret. + Data is the data to store as a secret, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + It must be empty if the Driver field is set, in which case the data is + loaded from an external secret store. The maximum allowed size is 500KB, + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). This field is only used to _create_ a secret, and is not returned by other endpoints. @@ -3404,8 +3407,9 @@ definitions: type: "string" Data: description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-3.2)) - config data. + Data is the data to store as a config, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). type: "string" Config: @@ -3909,55 +3913,27 @@ definitions: List of IP ranges to which nondistributable artifacts can be pushed, using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior, and enables the daemon to - push nondistributable artifacts to all registries whose resolved IP - address is within the subnet described by the CIDR syntax. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. - - > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. +


+ > **Deprecated**: Pushing nondistributable artifacts is now always enabled + > and this field is always `null`. type: "array" items: type: "string" - example: ["::1/128", "127.0.0.0/8"] + example: [] AllowNondistributableArtifactsHostnames: description: | List of registry hostnames to which nondistributable artifacts can be pushed, using the format `[:]` or `[:]`. - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior for the specified - registries. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. +


- > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. + > **Deprecated**: Pushing nondistributable artifacts is now always enabled + > and this field is always `null`. type: "array" items: type: "string" - example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"] + example: [] InsecureRegistryCIDRs: description: | List of IP ranges of insecure registries, using the CIDR syntax diff --git a/_vendor/github.com/moby/moby/docs/api/v1.35.yaml b/_vendor/github.com/moby/moby/docs/api/v1.35.yaml index ecba0678ab33..e7de26d46d72 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.35.yaml +++ b/_vendor/github.com/moby/moby/docs/api/v1.35.yaml @@ -3365,8 +3365,11 @@ definitions: com.example.some-other-label: "some-other-value" Data: description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-3.2)) - data to store as secret. + Data is the data to store as a secret, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + It must be empty if the Driver field is set, in which case the data is + loaded from an external secret store. The maximum allowed size is 500KB, + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). This field is only used to _create_ a secret, and is not returned by other endpoints. @@ -3408,8 +3411,9 @@ definitions: type: "string" Data: description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-3.2)) - config data. + Data is the data to store as a config, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). type: "string" Config: @@ -3913,55 +3917,27 @@ definitions: List of IP ranges to which nondistributable artifacts can be pushed, using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior, and enables the daemon to - push nondistributable artifacts to all registries whose resolved IP - address is within the subnet described by the CIDR syntax. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. - - > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. +


+ > **Deprecated**: Pushing nondistributable artifacts is now always enabled + > and this field is always `null`. type: "array" items: type: "string" - example: ["::1/128", "127.0.0.0/8"] + example: [] AllowNondistributableArtifactsHostnames: description: | List of registry hostnames to which nondistributable artifacts can be pushed, using the format `[:]` or `[:]`. - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior for the specified - registries. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. +


- > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. + > **Deprecated**: Pushing nondistributable artifacts is now always enabled + > and this field is always `null`. type: "array" items: type: "string" - example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"] + example: [] InsecureRegistryCIDRs: description: | List of IP ranges of insecure registries, using the CIDR syntax diff --git a/_vendor/github.com/moby/moby/docs/api/v1.36.yaml b/_vendor/github.com/moby/moby/docs/api/v1.36.yaml index c0d4de57d88b..d39839373f83 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.36.yaml +++ b/_vendor/github.com/moby/moby/docs/api/v1.36.yaml @@ -3378,8 +3378,11 @@ definitions: com.example.some-other-label: "some-other-value" Data: description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-3.2)) - data to store as secret. + Data is the data to store as a secret, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + It must be empty if the Driver field is set, in which case the data is + loaded from an external secret store. The maximum allowed size is 500KB, + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). This field is only used to _create_ a secret, and is not returned by other endpoints. @@ -3421,8 +3424,9 @@ definitions: type: "string" Data: description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-3.2)) - config data. + Data is the data to store as a config, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). type: "string" Config: @@ -3926,55 +3930,27 @@ definitions: List of IP ranges to which nondistributable artifacts can be pushed, using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior, and enables the daemon to - push nondistributable artifacts to all registries whose resolved IP - address is within the subnet described by the CIDR syntax. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. - - > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. +


+ > **Deprecated**: Pushing nondistributable artifacts is now always enabled + > and this field is always `null`. type: "array" items: type: "string" - example: ["::1/128", "127.0.0.0/8"] + example: [] AllowNondistributableArtifactsHostnames: description: | List of registry hostnames to which nondistributable artifacts can be pushed, using the format `[:]` or `[:]`. - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior for the specified - registries. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. +


- > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. + > **Deprecated**: Pushing nondistributable artifacts is now always enabled + > and this field is always `null`. type: "array" items: type: "string" - example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"] + example: [] InsecureRegistryCIDRs: description: | List of IP ranges of insecure registries, using the CIDR syntax diff --git a/_vendor/github.com/moby/moby/docs/api/v1.37.yaml b/_vendor/github.com/moby/moby/docs/api/v1.37.yaml index 6c00a3454898..014086b9d55a 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.37.yaml +++ b/_vendor/github.com/moby/moby/docs/api/v1.37.yaml @@ -3384,8 +3384,11 @@ definitions: com.example.some-other-label: "some-other-value" Data: description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-3.2)) - data to store as secret. + Data is the data to store as a secret, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + It must be empty if the Driver field is set, in which case the data is + loaded from an external secret store. The maximum allowed size is 500KB, + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). This field is only used to _create_ a secret, and is not returned by other endpoints. @@ -3434,8 +3437,9 @@ definitions: type: "string" Data: description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-3.2)) - config data. + Data is the data to store as a config, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). type: "string" Templating: description: | @@ -3946,55 +3950,27 @@ definitions: List of IP ranges to which nondistributable artifacts can be pushed, using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior, and enables the daemon to - push nondistributable artifacts to all registries whose resolved IP - address is within the subnet described by the CIDR syntax. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. - - > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. +


+ > **Deprecated**: Pushing nondistributable artifacts is now always enabled + > and this field is always `null`. type: "array" items: type: "string" - example: ["::1/128", "127.0.0.0/8"] + example: [] AllowNondistributableArtifactsHostnames: description: | List of registry hostnames to which nondistributable artifacts can be pushed, using the format `[:]` or `[:]`. - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior for the specified - registries. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. +


- > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. + > **Deprecated**: Pushing nondistributable artifacts is now always enabled + > and this field is always `null`. type: "array" items: type: "string" - example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"] + example: [] InsecureRegistryCIDRs: description: | List of IP ranges of insecure registries, using the CIDR syntax diff --git a/_vendor/github.com/moby/moby/docs/api/v1.38.yaml b/_vendor/github.com/moby/moby/docs/api/v1.38.yaml index c5aca74b3a1d..23555a87ac48 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.38.yaml +++ b/_vendor/github.com/moby/moby/docs/api/v1.38.yaml @@ -3438,8 +3438,11 @@ definitions: com.example.some-other-label: "some-other-value" Data: description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-3.2)) - data to store as secret. + Data is the data to store as a secret, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + It must be empty if the Driver field is set, in which case the data is + loaded from an external secret store. The maximum allowed size is 500KB, + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). This field is only used to _create_ a secret, and is not returned by other endpoints. @@ -3488,8 +3491,9 @@ definitions: type: "string" Data: description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-3.2)) - config data. + Data is the data to store as a config, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). type: "string" Templating: description: | @@ -4000,55 +4004,27 @@ definitions: List of IP ranges to which nondistributable artifacts can be pushed, using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior, and enables the daemon to - push nondistributable artifacts to all registries whose resolved IP - address is within the subnet described by the CIDR syntax. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. - - > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. +


+ > **Deprecated**: Pushing nondistributable artifacts is now always enabled + > and this field is always `null`. type: "array" items: type: "string" - example: ["::1/128", "127.0.0.0/8"] + example: [] AllowNondistributableArtifactsHostnames: description: | List of registry hostnames to which nondistributable artifacts can be pushed, using the format `[:]` or `[:]`. - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior for the specified - registries. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. +


- > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. + > **Deprecated**: Pushing nondistributable artifacts is now always enabled + > and this field is always `null`. type: "array" items: type: "string" - example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"] + example: [] InsecureRegistryCIDRs: description: | List of IP ranges of insecure registries, using the CIDR syntax diff --git a/_vendor/github.com/moby/moby/docs/api/v1.39.yaml b/_vendor/github.com/moby/moby/docs/api/v1.39.yaml index f5fff93f9510..2c16eca88604 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.39.yaml +++ b/_vendor/github.com/moby/moby/docs/api/v1.39.yaml @@ -4499,8 +4499,11 @@ definitions: com.example.some-other-label: "some-other-value" Data: description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) - data to store as secret. + Data is the data to store as a secret, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + It must be empty if the Driver field is set, in which case the data is + loaded from an external secret store. The maximum allowed size is 500KB, + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). This field is only used to _create_ a secret, and is not returned by other endpoints. @@ -4551,8 +4554,9 @@ definitions: type: "string" Data: description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) - config data. + Data is the data to store as a config, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). type: "string" Templating: description: | @@ -5278,55 +5282,27 @@ definitions: List of IP ranges to which nondistributable artifacts can be pushed, using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior, and enables the daemon to - push nondistributable artifacts to all registries whose resolved IP - address is within the subnet described by the CIDR syntax. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. - - > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. +


+ > **Deprecated**: Pushing nondistributable artifacts is now always enabled + > and this field is always `null`. type: "array" items: type: "string" - example: ["::1/128", "127.0.0.0/8"] + example: [] AllowNondistributableArtifactsHostnames: description: | List of registry hostnames to which nondistributable artifacts can be pushed, using the format `[:]` or `[:]`. - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior for the specified - registries. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. +


- > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. + > **Deprecated**: Pushing nondistributable artifacts is now always enabled + > and this field is always `null`. type: "array" items: type: "string" - example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"] + example: [] InsecureRegistryCIDRs: description: | List of IP ranges of insecure registries, using the CIDR syntax diff --git a/_vendor/github.com/moby/moby/docs/api/v1.40.yaml b/_vendor/github.com/moby/moby/docs/api/v1.40.yaml index 5e0171b987fd..27414904597d 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.40.yaml +++ b/_vendor/github.com/moby/moby/docs/api/v1.40.yaml @@ -4623,8 +4623,11 @@ definitions: com.example.some-other-label: "some-other-value" Data: description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) - data to store as secret. + Data is the data to store as a secret, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + It must be empty if the Driver field is set, in which case the data is + loaded from an external secret store. The maximum allowed size is 500KB, + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). This field is only used to _create_ a secret, and is not returned by other endpoints. @@ -4675,8 +4678,9 @@ definitions: type: "string" Data: description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) - config data. + Data is the data to store as a config, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). type: "string" Templating: description: | @@ -5415,55 +5419,27 @@ definitions: List of IP ranges to which nondistributable artifacts can be pushed, using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior, and enables the daemon to - push nondistributable artifacts to all registries whose resolved IP - address is within the subnet described by the CIDR syntax. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. - - > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. +


+ > **Deprecated**: Pushing nondistributable artifacts is now always enabled + > and this field is always `null`. type: "array" items: type: "string" - example: ["::1/128", "127.0.0.0/8"] + example: [] AllowNondistributableArtifactsHostnames: description: | List of registry hostnames to which nondistributable artifacts can be pushed, using the format `[:]` or `[:]`. - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior for the specified - registries. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. +


- > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. + > **Deprecated**: Pushing nondistributable artifacts is now always enabled + > and this field is always `null`. type: "array" items: type: "string" - example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"] + example: [] InsecureRegistryCIDRs: description: | List of IP ranges of insecure registries, using the CIDR syntax diff --git a/_vendor/github.com/moby/moby/docs/api/v1.41.yaml b/_vendor/github.com/moby/moby/docs/api/v1.41.yaml index 97fbc83baba8..0554b5a719ed 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.41.yaml +++ b/_vendor/github.com/moby/moby/docs/api/v1.41.yaml @@ -4872,8 +4872,11 @@ definitions: com.example.some-other-label: "some-other-value" Data: description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) - data to store as secret. + Data is the data to store as a secret, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + It must be empty if the Driver field is set, in which case the data is + loaded from an external secret store. The maximum allowed size is 500KB, + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). This field is only used to _create_ a secret, and is not returned by other endpoints. @@ -4924,8 +4927,9 @@ definitions: type: "string" Data: description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) - config data. + Data is the data to store as a config, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). type: "string" Templating: description: | @@ -5669,55 +5673,27 @@ definitions: List of IP ranges to which nondistributable artifacts can be pushed, using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior, and enables the daemon to - push nondistributable artifacts to all registries whose resolved IP - address is within the subnet described by the CIDR syntax. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. - - > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. +


+ > **Deprecated**: Pushing nondistributable artifacts is now always enabled + > and this field is always `null`. type: "array" items: type: "string" - example: ["::1/128", "127.0.0.0/8"] + example: [] AllowNondistributableArtifactsHostnames: description: | List of registry hostnames to which nondistributable artifacts can be pushed, using the format `[:]` or `[:]`. - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior for the specified - registries. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. +


- > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. + > **Deprecated**: Pushing nondistributable artifacts is now always enabled + > and this field is always `null`. type: "array" items: type: "string" - example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"] + example: [] InsecureRegistryCIDRs: description: | List of IP ranges of insecure registries, using the CIDR syntax diff --git a/_vendor/github.com/moby/moby/docs/api/v1.42.yaml b/_vendor/github.com/moby/moby/docs/api/v1.42.yaml index 03521c03bd3c..b31e84af5db0 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.42.yaml +++ b/_vendor/github.com/moby/moby/docs/api/v1.42.yaml @@ -4891,8 +4891,11 @@ definitions: com.example.some-other-label: "some-other-value" Data: description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) - data to store as secret. + Data is the data to store as a secret, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + It must be empty if the Driver field is set, in which case the data is + loaded from an external secret store. The maximum allowed size is 500KB, + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). This field is only used to _create_ a secret, and is not returned by other endpoints. @@ -4943,8 +4946,9 @@ definitions: type: "string" Data: description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) - config data. + Data is the data to store as a config, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). type: "string" Templating: description: | @@ -5669,55 +5673,27 @@ definitions: List of IP ranges to which nondistributable artifacts can be pushed, using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior, and enables the daemon to - push nondistributable artifacts to all registries whose resolved IP - address is within the subnet described by the CIDR syntax. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. - - > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. +


+ > **Deprecated**: Pushing nondistributable artifacts is now always enabled + > and this field is always `null`. type: "array" items: type: "string" - example: ["::1/128", "127.0.0.0/8"] + example: [] AllowNondistributableArtifactsHostnames: description: | List of registry hostnames to which nondistributable artifacts can be pushed, using the format `[:]` or `[:]`. - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior for the specified - registries. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. +


- > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. + > **Deprecated**: Pushing nondistributable artifacts is now always enabled + > and this field is always `null`. type: "array" items: type: "string" - example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"] + example: [] InsecureRegistryCIDRs: description: | List of IP ranges of insecure registries, using the CIDR syntax diff --git a/_vendor/github.com/moby/moby/docs/api/v1.43.yaml b/_vendor/github.com/moby/moby/docs/api/v1.43.yaml index 4043a57996b7..a1cdea0ea5e4 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.43.yaml +++ b/_vendor/github.com/moby/moby/docs/api/v1.43.yaml @@ -4922,8 +4922,11 @@ definitions: com.example.some-other-label: "some-other-value" Data: description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) - data to store as secret. + Data is the data to store as a secret, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + It must be empty if the Driver field is set, in which case the data is + loaded from an external secret store. The maximum allowed size is 500KB, + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). This field is only used to _create_ a secret, and is not returned by other endpoints. @@ -4974,8 +4977,9 @@ definitions: type: "string" Data: description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) - config data. + Data is the data to store as a config, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). type: "string" Templating: description: | @@ -5702,55 +5706,27 @@ definitions: List of IP ranges to which nondistributable artifacts can be pushed, using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior, and enables the daemon to - push nondistributable artifacts to all registries whose resolved IP - address is within the subnet described by the CIDR syntax. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. - - > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. +


+ > **Deprecated**: Pushing nondistributable artifacts is now always enabled + > and this field is always `null`. type: "array" items: type: "string" - example: ["::1/128", "127.0.0.0/8"] + example: [] AllowNondistributableArtifactsHostnames: description: | List of registry hostnames to which nondistributable artifacts can be pushed, using the format `[:]` or `[:]`. - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior for the specified - registries. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. +


- > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. + > **Deprecated**: Pushing nondistributable artifacts is now always enabled + > and this field is always `null`. type: "array" items: type: "string" - example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"] + example: [] InsecureRegistryCIDRs: description: | List of IP ranges of insecure registries, using the CIDR syntax diff --git a/_vendor/github.com/moby/moby/docs/api/v1.44.yaml b/_vendor/github.com/moby/moby/docs/api/v1.44.yaml index b26c138b1220..8e4e6121e622 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.44.yaml +++ b/_vendor/github.com/moby/moby/docs/api/v1.44.yaml @@ -5037,8 +5037,11 @@ definitions: com.example.some-other-label: "some-other-value" Data: description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) - data to store as secret. + Data is the data to store as a secret, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + It must be empty if the Driver field is set, in which case the data is + loaded from an external secret store. The maximum allowed size is 500KB, + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). This field is only used to _create_ a secret, and is not returned by other endpoints. @@ -5089,8 +5092,9 @@ definitions: type: "string" Data: description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) - config data. + Data is the data to store as a config, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). type: "string" Templating: description: | @@ -5835,55 +5839,27 @@ definitions: List of IP ranges to which nondistributable artifacts can be pushed, using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior, and enables the daemon to - push nondistributable artifacts to all registries whose resolved IP - address is within the subnet described by the CIDR syntax. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. - - > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. +


+ > **Deprecated**: Pushing nondistributable artifacts is now always enabled + > and this field is always `null`. type: "array" items: type: "string" - example: ["::1/128", "127.0.0.0/8"] + example: [] AllowNondistributableArtifactsHostnames: description: | List of registry hostnames to which nondistributable artifacts can be pushed, using the format `[:]` or `[:]`. - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior for the specified - registries. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. +


- > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. + > **Deprecated**: Pushing nondistributable artifacts is now always enabled + > and this field is always `null`. type: "array" items: type: "string" - example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"] + example: [] InsecureRegistryCIDRs: description: | List of IP ranges of insecure registries, using the CIDR syntax @@ -9671,13 +9647,9 @@ paths: ### Image tarball format - An image tarball contains one directory per image layer (named using its long ID), each containing these files: - - - `VERSION`: currently `1.0` - the file format version - - `json`: detailed layer information, similar to `docker inspect layer_id` - - `layer.tar`: A tarfile containing the filesystem changes in this layer + An image tarball contains [Content as defined in the OCI Image Layout Specification](https://github.com/opencontainers/image-spec/blob/v1.1.1/image-layout.md#content). - The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. + Additionally, includes the manifest.json file associated with a backwards compatible docker save format. If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. diff --git a/_vendor/github.com/moby/moby/docs/api/v1.45.yaml b/_vendor/github.com/moby/moby/docs/api/v1.45.yaml index 9d9522bf6adc..56d346fea4ce 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.45.yaml +++ b/_vendor/github.com/moby/moby/docs/api/v1.45.yaml @@ -5023,8 +5023,11 @@ definitions: com.example.some-other-label: "some-other-value" Data: description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) - data to store as secret. + Data is the data to store as a secret, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + It must be empty if the Driver field is set, in which case the data is + loaded from an external secret store. The maximum allowed size is 500KB, + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). This field is only used to _create_ a secret, and is not returned by other endpoints. @@ -5075,8 +5078,9 @@ definitions: type: "string" Data: description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) - config data. + Data is the data to store as a config, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). type: "string" Templating: description: | @@ -5821,55 +5825,27 @@ definitions: List of IP ranges to which nondistributable artifacts can be pushed, using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior, and enables the daemon to - push nondistributable artifacts to all registries whose resolved IP - address is within the subnet described by the CIDR syntax. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. - - > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. +


+ > **Deprecated**: Pushing nondistributable artifacts is now always enabled + > and this field is always `null`. type: "array" items: type: "string" - example: ["::1/128", "127.0.0.0/8"] + example: [] AllowNondistributableArtifactsHostnames: description: | List of registry hostnames to which nondistributable artifacts can be pushed, using the format `[:]` or `[:]`. - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior for the specified - registries. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. +


- > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. + > **Deprecated**: Pushing nondistributable artifacts is now always enabled + > and this field is always `null`. type: "array" items: type: "string" - example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"] + example: [] InsecureRegistryCIDRs: description: | List of IP ranges of insecure registries, using the CIDR syntax @@ -9651,13 +9627,9 @@ paths: ### Image tarball format - An image tarball contains one directory per image layer (named using its long ID), each containing these files: - - - `VERSION`: currently `1.0` - the file format version - - `json`: detailed layer information, similar to `docker inspect layer_id` - - `layer.tar`: A tarfile containing the filesystem changes in this layer + An image tarball contains [Content as defined in the OCI Image Layout Specification](https://github.com/opencontainers/image-spec/blob/v1.1.1/image-layout.md#content). - The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. + Additionally, includes the manifest.json file associated with a backwards compatible docker save format. If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. diff --git a/_vendor/github.com/moby/moby/docs/api/v1.46.yaml b/_vendor/github.com/moby/moby/docs/api/v1.46.yaml index 38fd49c2b477..cadb3570bec8 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.46.yaml +++ b/_vendor/github.com/moby/moby/docs/api/v1.46.yaml @@ -1385,7 +1385,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always empty. It must not be used, and will be removed in API v1.48. + > always empty. It must not be used, and will be removed in API v1.50. type: "string" example: "" Domainname: @@ -1395,7 +1395,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always empty. It must not be used, and will be removed in API v1.48. + > always empty. It must not be used, and will be removed in API v1.50. type: "string" example: "" User: @@ -1409,7 +1409,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. + > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1420,7 +1420,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. + > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1431,7 +1431,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. + > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1458,7 +1458,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. + > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1469,7 +1469,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. + > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1480,7 +1480,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. + > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1517,7 +1517,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always empty. It must not be used, and will be removed in API v1.48. + > always empty. It must not be used, and will be removed in API v1.50. type: "string" default: "" example: "" @@ -1556,7 +1556,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always omitted. It must not be used, and will be removed in API v1.48. + > always omitted. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1568,7 +1568,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always omitted. It must not be used, and will be removed in API v1.48. + > always omitted. It must not be used, and will be removed in API v1.50. type: "string" default: "" example: "" @@ -1602,7 +1602,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always omitted. It must not be used, and will be removed in API v1.48. + > always omitted. It must not be used, and will be removed in API v1.50. type: "integer" default: 10 x-nullable: true @@ -5082,8 +5082,11 @@ definitions: com.example.some-other-label: "some-other-value" Data: description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) - data to store as secret. + Data is the data to store as a secret, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + It must be empty if the Driver field is set, in which case the data is + loaded from an external secret store. The maximum allowed size is 500KB, + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). This field is only used to _create_ a secret, and is not returned by other endpoints. @@ -5134,8 +5137,9 @@ definitions: type: "string" Data: description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) - config data. + Data is the data to store as a config, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). type: "string" Templating: description: | @@ -5932,55 +5936,27 @@ definitions: List of IP ranges to which nondistributable artifacts can be pushed, using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior, and enables the daemon to - push nondistributable artifacts to all registries whose resolved IP - address is within the subnet described by the CIDR syntax. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. - - > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. +


+ > **Deprecated**: Pushing nondistributable artifacts is now always enabled + > and this field is always `null`. type: "array" items: type: "string" - example: ["::1/128", "127.0.0.0/8"] + example: [] AllowNondistributableArtifactsHostnames: description: | List of registry hostnames to which nondistributable artifacts can be pushed, using the format `[:]` or `[:]`. - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior for the specified - registries. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. +


- > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. + > **Deprecated**: Pushing nondistributable artifacts is now always enabled + > and this field is always `null`. type: "array" items: type: "string" - example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"] + example: [] InsecureRegistryCIDRs: description: | List of IP ranges of insecure registries, using the CIDR syntax @@ -9785,13 +9761,9 @@ paths: ### Image tarball format - An image tarball contains one directory per image layer (named using its long ID), each containing these files: - - - `VERSION`: currently `1.0` - the file format version - - `json`: detailed layer information, similar to `docker inspect layer_id` - - `layer.tar`: A tarfile containing the filesystem changes in this layer + An image tarball contains [Content as defined in the OCI Image Layout Specification](https://github.com/opencontainers/image-spec/blob/v1.1.1/image-layout.md#content). - The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. + Additionally, includes the manifest.json file associated with a backwards compatible docker save format. If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. diff --git a/_vendor/github.com/moby/moby/docs/api/v1.47.yaml b/_vendor/github.com/moby/moby/docs/api/v1.47.yaml index bba025b6d7d0..4ece9730cbd5 100644 --- a/_vendor/github.com/moby/moby/docs/api/v1.47.yaml +++ b/_vendor/github.com/moby/moby/docs/api/v1.47.yaml @@ -1385,7 +1385,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always empty. It must not be used, and will be removed in API v1.48. + > always empty. It must not be used, and will be removed in API v1.50. type: "string" example: "" Domainname: @@ -1395,7 +1395,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always empty. It must not be used, and will be removed in API v1.48. + > always empty. It must not be used, and will be removed in API v1.50. type: "string" example: "" User: @@ -1409,7 +1409,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. + > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1420,7 +1420,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. + > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1431,7 +1431,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. + > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1458,7 +1458,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. + > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1469,7 +1469,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. + > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1480,7 +1480,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. + > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1517,7 +1517,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always empty. It must not be used, and will be removed in API v1.48. + > always empty. It must not be used, and will be removed in API v1.50. type: "string" default: "" example: "" @@ -1556,7 +1556,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always omitted. It must not be used, and will be removed in API v1.48. + > always omitted. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false @@ -1568,7 +1568,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always omitted. It must not be used, and will be removed in API v1.48. + > always omitted. It must not be used, and will be removed in API v1.50. type: "string" default: "" example: "" @@ -1602,7 +1602,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always omitted. It must not be used, and will be removed in API v1.48. + > always omitted. It must not be used, and will be removed in API v1.50. type: "integer" default: 10 x-nullable: true @@ -5100,8 +5100,11 @@ definitions: com.example.some-other-label: "some-other-value" Data: description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) - data to store as secret. + Data is the data to store as a secret, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + It must be empty if the Driver field is set, in which case the data is + loaded from an external secret store. The maximum allowed size is 500KB, + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). This field is only used to _create_ a secret, and is not returned by other endpoints. @@ -5152,8 +5155,9 @@ definitions: type: "string" Data: description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) - config data. + Data is the data to store as a config, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). type: "string" Templating: description: | @@ -5514,7 +5518,11 @@ definitions: type: "boolean" example: true BridgeNfIptables: - description: "Indicates if `bridge-nf-call-iptables` is available on the host." + description: | + Indicates if `bridge-nf-call-iptables` is available on the host when + the daemon was started. + + The `br_netfilter` type: "boolean" example: true BridgeNfIp6tables: @@ -5950,55 +5958,27 @@ definitions: List of IP ranges to which nondistributable artifacts can be pushed, using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior, and enables the daemon to - push nondistributable artifacts to all registries whose resolved IP - address is within the subnet described by the CIDR syntax. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. - - > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. +


+ > **Deprecated**: Pushing nondistributable artifacts is now always enabled + > and this field is always `null`. type: "array" items: type: "string" - example: ["::1/128", "127.0.0.0/8"] + example: [] AllowNondistributableArtifactsHostnames: description: | List of registry hostnames to which nondistributable artifacts can be pushed, using the format `[:]` or `[:]`. - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior for the specified - registries. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. +


- > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. + > **Deprecated**: Pushing nondistributable artifacts is now always enabled + > and this field is always `null`. type: "array" items: type: "string" - example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"] + example: [] InsecureRegistryCIDRs: description: | List of IP ranges of insecure registries, using the CIDR syntax @@ -9922,13 +9902,9 @@ paths: ### Image tarball format - An image tarball contains one directory per image layer (named using its long ID), each containing these files: - - - `VERSION`: currently `1.0` - the file format version - - `json`: detailed layer information, similar to `docker inspect layer_id` - - `layer.tar`: A tarfile containing the filesystem changes in this layer + An image tarball contains [Content as defined in the OCI Image Layout Specification](https://github.com/opencontainers/image-spec/blob/v1.1.1/image-layout.md#content). - The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. + Additionally, includes the manifest.json file associated with a backwards compatible docker save format. If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. diff --git a/_vendor/github.com/moby/moby/docs/api/v1.48.yaml b/_vendor/github.com/moby/moby/docs/api/v1.48.yaml new file mode 100644 index 000000000000..c1441c8dfcbe --- /dev/null +++ b/_vendor/github.com/moby/moby/docs/api/v1.48.yaml @@ -0,0 +1,13536 @@ +# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API. +# +# This is used for generating API documentation and the types used by the +# client/server. See api/README.md for more information. +# +# Some style notes: +# - This file is used by ReDoc, which allows GitHub Flavored Markdown in +# descriptions. +# - There is no maximum line length, for ease of editing and pretty diffs. +# - operationIds are in the format "NounVerb", with a singular noun. + +swagger: "2.0" +schemes: + - "http" + - "https" +produces: + - "application/json" + - "text/plain" +consumes: + - "application/json" + - "text/plain" +basePath: "/v1.48" +info: + title: "Docker Engine API" + version: "1.48" + x-logo: + url: "https://docs.docker.com/assets/images/logo-docker-main.png" + description: | + The Engine API is an HTTP API served by Docker Engine. It is the API the + Docker client uses to communicate with the Engine, so everything the Docker + client can do can be done with the API. + + Most of the client's commands map directly to API endpoints (e.g. `docker ps` + is `GET /containers/json`). The notable exception is running containers, + which consists of several API calls. + + # Errors + + The API uses standard HTTP status codes to indicate the success or failure + of the API call. The body of the response will be JSON in the following + format: + + ``` + { + "message": "page not found" + } + ``` + + # Versioning + + The API is usually changed in each release, so API calls are versioned to + ensure that clients don't break. To lock to a specific version of the API, + you prefix the URL with its version, for example, call `/v1.30/info` to use + the v1.30 version of the `/info` endpoint. If the API version specified in + the URL is not supported by the daemon, a HTTP `400 Bad Request` error message + is returned. + + If you omit the version-prefix, the current version of the API (v1.48) is used. + For example, calling `/info` is the same as calling `/v1.48/info`. Using the + API without a version-prefix is deprecated and will be removed in a future release. + + Engine releases in the near future should support this version of the API, + so your client will continue to work even if it is talking to a newer Engine. + + The API uses an open schema model, which means the server may add extra properties + to responses. Likewise, the server will ignore any extra query parameters and + request body properties. When you write clients, you need to ignore additional + properties in responses to ensure they do not break when talking to newer + daemons. + + + # Authentication + + Authentication for registries is handled client side. The client has to send + authentication details to various endpoints that need to communicate with + registries, such as `POST /images/(name)/push`. These are sent as + `X-Registry-Auth` header as a [base64url encoded](https://tools.ietf.org/html/rfc4648#section-5) + (JSON) string with the following structure: + + ``` + { + "username": "string", + "password": "string", + "email": "string", + "serveraddress": "string" + } + ``` + + The `serveraddress` is a domain/IP without a protocol. Throughout this + structure, double quotes are required. + + If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), + you can just pass this instead of credentials: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +# The tags on paths define the menu sections in the ReDoc documentation, so +# the usage of tags must make sense for that: +# - They should be singular, not plural. +# - There should not be too many tags, or the menu becomes unwieldy. For +# example, it is preferable to add a path to the "System" tag instead of +# creating a tag with a single path in it. +# - The order of tags in this list defines the order in the menu. +tags: + # Primary objects + - name: "Container" + x-displayName: "Containers" + description: | + Create and manage containers. + - name: "Image" + x-displayName: "Images" + - name: "Network" + x-displayName: "Networks" + description: | + Networks are user-defined networks that containers can be attached to. + See the [networking documentation](https://docs.docker.com/network/) + for more information. + - name: "Volume" + x-displayName: "Volumes" + description: | + Create and manage persistent storage that can be attached to containers. + - name: "Exec" + x-displayName: "Exec" + description: | + Run new commands inside running containers. Refer to the + [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) + for more information. + + To exec a command in a container, you first need to create an exec instance, + then start it. These two API endpoints are wrapped up in a single command-line + command, `docker exec`. + + # Swarm things + - name: "Swarm" + x-displayName: "Swarm" + description: | + Engines can be clustered together in a swarm. Refer to the + [swarm mode documentation](https://docs.docker.com/engine/swarm/) + for more information. + - name: "Node" + x-displayName: "Nodes" + description: | + Nodes are instances of the Engine participating in a swarm. Swarm mode + must be enabled for these endpoints to work. + - name: "Service" + x-displayName: "Services" + description: | + Services are the definitions of tasks to run on a swarm. Swarm mode must + be enabled for these endpoints to work. + - name: "Task" + x-displayName: "Tasks" + description: | + A task is a container running on a swarm. It is the atomic scheduling unit + of swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Secret" + x-displayName: "Secrets" + description: | + Secrets are sensitive data that can be used by services. Swarm mode must + be enabled for these endpoints to work. + - name: "Config" + x-displayName: "Configs" + description: | + Configs are application configurations that can be used by services. Swarm + mode must be enabled for these endpoints to work. + # System things + - name: "Plugin" + x-displayName: "Plugins" + - name: "System" + x-displayName: "System" + +definitions: + Port: + type: "object" + description: "An open port on a container" + required: [PrivatePort, Type] + properties: + IP: + type: "string" + format: "ip-address" + description: "Host IP address that the container's port is mapped to" + PrivatePort: + type: "integer" + format: "uint16" + x-nullable: false + description: "Port on the container" + PublicPort: + type: "integer" + format: "uint16" + description: "Port exposed on the host" + Type: + type: "string" + x-nullable: false + enum: ["tcp", "udp", "sctp"] + example: + PrivatePort: 8080 + PublicPort: 80 + Type: "tcp" + + MountPoint: + type: "object" + description: | + MountPoint represents a mount point configuration inside the container. + This is used for reporting the mountpoints in use by a container. + properties: + Type: + description: | + The mount type: + + - `bind` a mount of a file or directory from the host into the container. + - `volume` a docker volume with the given `Name`. + - `image` a docker image + - `tmpfs` a `tmpfs`. + - `npipe` a named pipe from the host into the container. + - `cluster` a Swarm cluster volume + type: "string" + enum: + - "bind" + - "volume" + - "image" + - "tmpfs" + - "npipe" + - "cluster" + example: "volume" + Name: + description: | + Name is the name reference to the underlying data defined by `Source` + e.g., the volume name. + type: "string" + example: "myvolume" + Source: + description: | + Source location of the mount. + + For volumes, this contains the storage location of the volume (within + `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains + the source (host) part of the bind-mount. For `tmpfs` mount points, this + field is empty. + type: "string" + example: "/var/lib/docker/volumes/myvolume/_data" + Destination: + description: | + Destination is the path relative to the container root (`/`) where + the `Source` is mounted inside the container. + type: "string" + example: "/usr/share/nginx/html/" + Driver: + description: | + Driver is the volume driver used to create the volume (if it is a volume). + type: "string" + example: "local" + Mode: + description: | + Mode is a comma separated list of options supplied by the user when + creating the bind/volume mount. + + The default is platform-specific (`"z"` on Linux, empty on Windows). + type: "string" + example: "z" + RW: + description: | + Whether the mount is mounted writable (read-write). + type: "boolean" + example: true + Propagation: + description: | + Propagation describes how mounts are propagated from the host into the + mount point, and vice-versa. Refer to the [Linux kernel documentation](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt) + for details. This field is not used on Windows. + type: "string" + example: "" + + DeviceMapping: + type: "object" + description: "A device mapping between the host and container" + properties: + PathOnHost: + type: "string" + PathInContainer: + type: "string" + CgroupPermissions: + type: "string" + example: + PathOnHost: "/dev/deviceName" + PathInContainer: "/dev/deviceName" + CgroupPermissions: "mrw" + + DeviceRequest: + type: "object" + description: "A request for devices to be sent to device drivers" + properties: + Driver: + type: "string" + example: "nvidia" + Count: + type: "integer" + example: -1 + DeviceIDs: + type: "array" + items: + type: "string" + example: + - "0" + - "1" + - "GPU-fef8089b-4820-abfc-e83e-94318197576e" + Capabilities: + description: | + A list of capabilities; an OR list of AND lists of capabilities. + type: "array" + items: + type: "array" + items: + type: "string" + example: + # gpu AND nvidia AND compute + - ["gpu", "nvidia", "compute"] + Options: + description: | + Driver-specific options, specified as a key/value pairs. These options + are passed directly to the driver. + type: "object" + additionalProperties: + type: "string" + + ThrottleDevice: + type: "object" + properties: + Path: + description: "Device path" + type: "string" + Rate: + description: "Rate" + type: "integer" + format: "int64" + minimum: 0 + + Mount: + type: "object" + properties: + Target: + description: "Container path." + type: "string" + Source: + description: "Mount source (e.g. a volume name, a host path)." + type: "string" + Type: + description: | + The mount type. Available types: + + - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. + - `image` Mounts an image. + - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. + - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. + - `cluster` a Swarm cluster volume + type: "string" + enum: + - "bind" + - "volume" + - "image" + - "tmpfs" + - "npipe" + - "cluster" + ReadOnly: + description: "Whether the mount should be read-only." + type: "boolean" + Consistency: + description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`." + type: "string" + BindOptions: + description: "Optional configuration for the `bind` type." + type: "object" + properties: + Propagation: + description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." + type: "string" + enum: + - "private" + - "rprivate" + - "shared" + - "rshared" + - "slave" + - "rslave" + NonRecursive: + description: "Disable recursive bind mount." + type: "boolean" + default: false + CreateMountpoint: + description: "Create mount point on host if missing" + type: "boolean" + default: false + ReadOnlyNonRecursive: + description: | + Make the mount non-recursively read-only, but still leave the mount recursive + (unless NonRecursive is set to `true` in conjunction). + + Added in v1.44, before that version all read-only mounts were + non-recursive by default. To match the previous behaviour this + will default to `true` for clients on versions prior to v1.44. + type: "boolean" + default: false + ReadOnlyForceRecursive: + description: "Raise an error if the mount cannot be made recursively read-only." + type: "boolean" + default: false + VolumeOptions: + description: "Optional configuration for the `volume` type." + type: "object" + properties: + NoCopy: + description: "Populate volume with data from the target." + type: "boolean" + default: false + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + DriverConfig: + description: "Map of driver specific options" + type: "object" + properties: + Name: + description: "Name of the driver to use to create the volume." + type: "string" + Options: + description: "key/value map of driver specific options." + type: "object" + additionalProperties: + type: "string" + Subpath: + description: "Source path inside the volume. Must be relative without any back traversals." + type: "string" + example: "dir-inside-volume/subdirectory" + ImageOptions: + description: "Optional configuration for the `image` type." + type: "object" + properties: + Subpath: + description: "Source path inside the image. Must be relative without any back traversals." + type: "string" + example: "dir-inside-image/subdirectory" + TmpfsOptions: + description: "Optional configuration for the `tmpfs` type." + type: "object" + properties: + SizeBytes: + description: "The size for the tmpfs mount in bytes." + type: "integer" + format: "int64" + Mode: + description: "The permission mode for the tmpfs mount in an integer." + type: "integer" + Options: + description: | + The options to be passed to the tmpfs mount. An array of arrays. + Flag options should be provided as 1-length arrays. Other types + should be provided as as 2-length arrays, where the first item is + the key and the second the value. + type: "array" + items: + type: "array" + minItems: 1 + maxItems: 2 + items: + type: "string" + example: + [["noexec"]] + + RestartPolicy: + description: | + The behavior to apply when the container exits. The default is not to + restart. + + An ever increasing delay (double the previous delay, starting at 100ms) is + added before each restart to prevent flooding the server. + type: "object" + properties: + Name: + type: "string" + description: | + - Empty string means not to restart + - `no` Do not automatically restart + - `always` Always restart + - `unless-stopped` Restart always except when the user has manually stopped the container + - `on-failure` Restart only when the container exit code is non-zero + enum: + - "" + - "no" + - "always" + - "unless-stopped" + - "on-failure" + MaximumRetryCount: + type: "integer" + description: | + If `on-failure` is used, the number of times to retry before giving up. + + Resources: + description: "A container's resources (cgroups config, ulimits, etc)" + type: "object" + properties: + # Applicable to all platforms + CpuShares: + description: | + An integer value representing this container's relative CPU weight + versus other containers. + type: "integer" + Memory: + description: "Memory limit in bytes." + type: "integer" + format: "int64" + default: 0 + # Applicable to UNIX platforms + CgroupParent: + description: | + Path to `cgroups` under which the container's `cgroup` is created. If + the path is not absolute, the path is considered to be relative to the + `cgroups` path of the init process. Cgroups are created if they do not + already exist. + type: "string" + BlkioWeight: + description: "Block IO weight (relative weight)." + type: "integer" + minimum: 0 + maximum: 1000 + BlkioWeightDevice: + description: | + Block IO weight (relative device weight) in the form: + + ``` + [{"Path": "device_path", "Weight": weight}] + ``` + type: "array" + items: + type: "object" + properties: + Path: + type: "string" + Weight: + type: "integer" + minimum: 0 + BlkioDeviceReadBps: + description: | + Limit read rate (bytes per second) from a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteBps: + description: | + Limit write rate (bytes per second) to a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceReadIOps: + description: | + Limit read rate (IO per second) from a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteIOps: + description: | + Limit write rate (IO per second) to a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + CpuPeriod: + description: "The length of a CPU period in microseconds." + type: "integer" + format: "int64" + CpuQuota: + description: | + Microseconds of CPU time that the container can get in a CPU period. + type: "integer" + format: "int64" + CpuRealtimePeriod: + description: | + The length of a CPU real-time period in microseconds. Set to 0 to + allocate no time allocated to real-time tasks. + type: "integer" + format: "int64" + CpuRealtimeRuntime: + description: | + The length of a CPU real-time runtime in microseconds. Set to 0 to + allocate no time allocated to real-time tasks. + type: "integer" + format: "int64" + CpusetCpus: + description: | + CPUs in which to allow execution (e.g., `0-3`, `0,1`). + type: "string" + example: "0-3" + CpusetMems: + description: | + Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only + effective on NUMA systems. + type: "string" + Devices: + description: "A list of devices to add to the container." + type: "array" + items: + $ref: "#/definitions/DeviceMapping" + DeviceCgroupRules: + description: "a list of cgroup rules to apply to the container" + type: "array" + items: + type: "string" + example: "c 13:* rwm" + DeviceRequests: + description: | + A list of requests for devices to be sent to device drivers. + type: "array" + items: + $ref: "#/definitions/DeviceRequest" + KernelMemoryTCP: + description: | + Hard limit for kernel TCP buffer memory (in bytes). Depending on the + OCI runtime in use, this option may be ignored. It is no longer supported + by the default (runc) runtime. + + This field is omitted when empty. + type: "integer" + format: "int64" + MemoryReservation: + description: "Memory soft limit in bytes." + type: "integer" + format: "int64" + MemorySwap: + description: | + Total memory limit (memory + swap). Set as `-1` to enable unlimited + swap. + type: "integer" + format: "int64" + MemorySwappiness: + description: | + Tune a container's memory swappiness behavior. Accepts an integer + between 0 and 100. + type: "integer" + format: "int64" + minimum: 0 + maximum: 100 + NanoCpus: + description: "CPU quota in units of 10-9 CPUs." + type: "integer" + format: "int64" + OomKillDisable: + description: "Disable OOM Killer for the container." + type: "boolean" + Init: + description: | + Run an init inside the container that forwards signals and reaps + processes. This field is omitted if empty, and the default (as + configured on the daemon) is used. + type: "boolean" + x-nullable: true + PidsLimit: + description: | + Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` + to not change. + type: "integer" + format: "int64" + x-nullable: true + Ulimits: + description: | + A list of resource limits to set in the container. For example: + + ``` + {"Name": "nofile", "Soft": 1024, "Hard": 2048} + ``` + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + # Applicable to Windows + CpuCount: + description: | + The number of usable CPUs (Windows only). + + On Windows Server containers, the processor resource controls are + mutually exclusive. The order of precedence is `CPUCount` first, then + `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + CpuPercent: + description: | + The usable percentage of the available CPUs (Windows only). + + On Windows Server containers, the processor resource controls are + mutually exclusive. The order of precedence is `CPUCount` first, then + `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + IOMaximumIOps: + description: "Maximum IOps for the container system drive (Windows only)" + type: "integer" + format: "int64" + IOMaximumBandwidth: + description: | + Maximum IO in bytes per second for the container system drive + (Windows only). + type: "integer" + format: "int64" + + Limit: + description: | + An object describing a limit on resources which can be requested by a task. + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + example: 4000000000 + MemoryBytes: + type: "integer" + format: "int64" + example: 8272408576 + Pids: + description: | + Limits the maximum number of PIDs in the container. Set `0` for unlimited. + type: "integer" + format: "int64" + default: 0 + example: 100 + + ResourceObject: + description: | + An object describing the resources which can be advertised by a node and + requested by a task. + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + example: 4000000000 + MemoryBytes: + type: "integer" + format: "int64" + example: 8272408576 + GenericResources: + $ref: "#/definitions/GenericResources" + + GenericResources: + description: | + User-defined resources can be either Integer resources (e.g, `SSD=3`) or + String resources (e.g, `GPU=UUID1`). + type: "array" + items: + type: "object" + properties: + NamedResourceSpec: + type: "object" + properties: + Kind: + type: "string" + Value: + type: "string" + DiscreteResourceSpec: + type: "object" + properties: + Kind: + type: "string" + Value: + type: "integer" + format: "int64" + example: + - DiscreteResourceSpec: + Kind: "SSD" + Value: 3 + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID1" + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID2" + + HealthConfig: + description: "A test to perform to check that the container is healthy." + type: "object" + properties: + Test: + description: | + The test to perform. Possible values are: + + - `[]` inherit healthcheck from image or parent image + - `["NONE"]` disable healthcheck + - `["CMD", args...]` exec arguments directly + - `["CMD-SHELL", command]` run command with system's default shell + type: "array" + items: + type: "string" + Interval: + description: | + The time to wait between checks in nanoseconds. It should be 0 or at + least 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + Timeout: + description: | + The time to wait before considering the check to have hung. It should + be 0 or at least 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + Retries: + description: | + The number of consecutive failures needed to consider a container as + unhealthy. 0 means inherit. + type: "integer" + StartPeriod: + description: | + Start period for the container to initialize before starting + health-retries countdown in nanoseconds. It should be 0 or at least + 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + StartInterval: + description: | + The time to wait between checks in nanoseconds during the start period. + It should be 0 or at least 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + + Health: + description: | + Health stores information about the container's healthcheck results. + type: "object" + x-nullable: true + properties: + Status: + description: | + Status is one of `none`, `starting`, `healthy` or `unhealthy` + + - "none" Indicates there is no healthcheck + - "starting" Starting indicates that the container is not yet ready + - "healthy" Healthy indicates that the container is running correctly + - "unhealthy" Unhealthy indicates that the container has a problem + type: "string" + enum: + - "none" + - "starting" + - "healthy" + - "unhealthy" + example: "healthy" + FailingStreak: + description: "FailingStreak is the number of consecutive failures" + type: "integer" + example: 0 + Log: + type: "array" + description: | + Log contains the last few results (oldest first) + items: + $ref: "#/definitions/HealthcheckResult" + + HealthcheckResult: + description: | + HealthcheckResult stores information about a single run of a healthcheck probe + type: "object" + x-nullable: true + properties: + Start: + description: | + Date and time at which this check started in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "date-time" + example: "2020-01-04T10:44:24.496525531Z" + End: + description: | + Date and time at which this check ended in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2020-01-04T10:45:21.364524523Z" + ExitCode: + description: | + ExitCode meanings: + + - `0` healthy + - `1` unhealthy + - `2` reserved (considered unhealthy) + - other values: error running probe + type: "integer" + example: 0 + Output: + description: "Output from last check" + type: "string" + + HostConfig: + description: "Container configuration that depends on the host we are running on" + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + # Applicable to all platforms + Binds: + type: "array" + description: | + A list of volume bindings for this container. Each volume binding + is a string in one of these forms: + + - `host-src:container-dest[:options]` to bind-mount a host path + into the container. Both `host-src`, and `container-dest` must + be an _absolute_ path. + - `volume-name:container-dest[:options]` to bind-mount a volume + managed by a volume driver into the container. `container-dest` + must be an _absolute_ path. + + `options` is an optional, comma-delimited list of: + + - `nocopy` disables automatic copying of data from the container + path to the volume. The `nocopy` flag only applies to named volumes. + - `[ro|rw]` mounts a volume read-only or read-write, respectively. + If omitted or set to `rw`, volumes are mounted read-write. + - `[z|Z]` applies SELinux labels to allow or deny multiple containers + to read and write to the same volume. + - `z`: a _shared_ content label is applied to the content. This + label indicates that multiple containers can share the volume + content, for both reading and writing. + - `Z`: a _private unshared_ label is applied to the content. + This label indicates that only the current container can use + a private volume. Labeling systems such as SELinux require + proper labels to be placed on volume content that is mounted + into a container. Without a label, the security system can + prevent a container's processes from using the content. By + default, the labels set by the host operating system are not + modified. + - `[[r]shared|[r]slave|[r]private]` specifies mount + [propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt). + This only applies to bind-mounted volumes, not internal volumes + or named volumes. Mount propagation requires the source mount + point (the location where the source directory is mounted in the + host operating system) to have the correct propagation properties. + For shared volumes, the source mount point must be set to `shared`. + For slave volumes, the mount must be set to either `shared` or + `slave`. + items: + type: "string" + ContainerIDFile: + type: "string" + description: "Path to a file where the container ID is written" + example: "" + LogConfig: + type: "object" + description: "The logging configuration for this container" + properties: + Type: + description: |- + Name of the logging driver used for the container or "none" + if logging is disabled. + type: "string" + enum: + - "local" + - "json-file" + - "syslog" + - "journald" + - "gelf" + - "fluentd" + - "awslogs" + - "splunk" + - "etwlogs" + - "none" + Config: + description: |- + Driver-specific configuration options for the logging driver. + type: "object" + additionalProperties: + type: "string" + example: + "max-file": "5" + "max-size": "10m" + NetworkMode: + type: "string" + description: | + Network mode to use for this container. Supported standard values + are: `bridge`, `host`, `none`, and `container:`. Any + other value is taken as a custom network's name to which this + container should connect to. + PortBindings: + $ref: "#/definitions/PortMap" + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + AutoRemove: + type: "boolean" + description: | + Automatically remove the container when the container's process + exits. This has no effect if `RestartPolicy` is set. + VolumeDriver: + type: "string" + description: "Driver that this container uses to mount volumes." + VolumesFrom: + type: "array" + description: | + A list of volumes to inherit from another container, specified in + the form `[:]`. + items: + type: "string" + Mounts: + description: | + Specification for mounts to be added to the container. + type: "array" + items: + $ref: "#/definitions/Mount" + ConsoleSize: + type: "array" + description: | + Initial console size, as an `[height, width]` array. + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + example: [80, 64] + Annotations: + type: "object" + description: | + Arbitrary non-identifying metadata attached to container and + provided to the runtime when the container is started. + additionalProperties: + type: "string" + + # Applicable to UNIX platforms + CapAdd: + type: "array" + description: | + A list of kernel capabilities to add to the container. Conflicts + with option 'Capabilities'. + items: + type: "string" + CapDrop: + type: "array" + description: | + A list of kernel capabilities to drop from the container. Conflicts + with option 'Capabilities'. + items: + type: "string" + CgroupnsMode: + type: "string" + enum: + - "private" + - "host" + description: | + cgroup namespace mode for the container. Possible values are: + + - `"private"`: the container runs in its own private cgroup namespace + - `"host"`: use the host system's cgroup namespace + + If not specified, the daemon default is used, which can either be `"private"` + or `"host"`, depending on daemon version, kernel support and configuration. + Dns: + type: "array" + description: "A list of DNS servers for the container to use." + items: + type: "string" + DnsOptions: + type: "array" + description: "A list of DNS options." + items: + type: "string" + DnsSearch: + type: "array" + description: "A list of DNS search domains." + items: + type: "string" + ExtraHosts: + type: "array" + description: | + A list of hostnames/IP mappings to add to the container's `/etc/hosts` + file. Specified in the form `["hostname:IP"]`. + items: + type: "string" + GroupAdd: + type: "array" + description: | + A list of additional groups that the container process will run as. + items: + type: "string" + IpcMode: + type: "string" + description: | + IPC sharing mode for the container. Possible values are: + + - `"none"`: own private IPC namespace, with /dev/shm not mounted + - `"private"`: own private IPC namespace + - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers + - `"container:"`: join another (shareable) container's IPC namespace + - `"host"`: use the host system's IPC namespace + + If not specified, daemon default is used, which can either be `"private"` + or `"shareable"`, depending on daemon version and configuration. + Cgroup: + type: "string" + description: "Cgroup to use for the container." + Links: + type: "array" + description: | + A list of links for the container in the form `container_name:alias`. + items: + type: "string" + OomScoreAdj: + type: "integer" + description: | + An integer value containing the score given to the container in + order to tune OOM killer preferences. + example: 500 + PidMode: + type: "string" + description: | + Set the PID (Process) Namespace mode for the container. It can be + either: + + - `"container:"`: joins another container's PID namespace + - `"host"`: use the host's PID namespace inside the container + Privileged: + type: "boolean" + description: |- + Gives the container full access to the host. + PublishAllPorts: + type: "boolean" + description: | + Allocates an ephemeral host port for all of a container's + exposed ports. + + Ports are de-allocated when the container stops and allocated when + the container starts. The allocated port might be changed when + restarting the container. + + The port is selected from the ephemeral port range that depends on + the kernel. For example, on Linux the range is defined by + `/proc/sys/net/ipv4/ip_local_port_range`. + ReadonlyRootfs: + type: "boolean" + description: "Mount the container's root filesystem as read only." + SecurityOpt: + type: "array" + description: | + A list of string values to customize labels for MLS systems, such + as SELinux. + items: + type: "string" + StorageOpt: + type: "object" + description: | + Storage driver options for this container, in the form `{"size": "120G"}`. + additionalProperties: + type: "string" + Tmpfs: + type: "object" + description: | + A map of container directories which should be replaced by tmpfs + mounts, and their corresponding mount options. For example: + + ``` + { "/run": "rw,noexec,nosuid,size=65536k" } + ``` + additionalProperties: + type: "string" + UTSMode: + type: "string" + description: "UTS namespace to use for the container." + UsernsMode: + type: "string" + description: | + Sets the usernamespace mode for the container when usernamespace + remapping option is enabled. + ShmSize: + type: "integer" + format: "int64" + description: | + Size of `/dev/shm` in bytes. If omitted, the system uses 64MB. + minimum: 0 + Sysctls: + type: "object" + x-nullable: true + description: |- + A list of kernel parameters (sysctls) to set in the container. + + This field is omitted if not set. + additionalProperties: + type: "string" + example: + "net.ipv4.ip_forward": "1" + Runtime: + type: "string" + x-nullable: true + description: |- + Runtime to use with this container. + # Applicable to Windows + Isolation: + type: "string" + description: | + Isolation technology of the container. (Windows only) + enum: + - "default" + - "process" + - "hyperv" + - "" + MaskedPaths: + type: "array" + description: | + The list of paths to be masked inside the container (this overrides + the default set of paths). + items: + type: "string" + example: + - "/proc/asound" + - "/proc/acpi" + - "/proc/kcore" + - "/proc/keys" + - "/proc/latency_stats" + - "/proc/timer_list" + - "/proc/timer_stats" + - "/proc/sched_debug" + - "/proc/scsi" + - "/sys/firmware" + - "/sys/devices/virtual/powercap" + ReadonlyPaths: + type: "array" + description: | + The list of paths to be set as read-only inside the container + (this overrides the default set of paths). + items: + type: "string" + example: + - "/proc/bus" + - "/proc/fs" + - "/proc/irq" + - "/proc/sys" + - "/proc/sysrq-trigger" + + ContainerConfig: + description: | + Configuration for a container that is portable between hosts. + type: "object" + properties: + Hostname: + description: | + The hostname to use for the container, as a valid RFC 1123 hostname. + type: "string" + example: "439f4e91bd1d" + Domainname: + description: | + The domain name to use for the container. + type: "string" + User: + description: |- + Commands run as this user inside the container. If omitted, commands + run as the user specified in the image the container was started from. + + Can be either user-name or UID, and optional group-name or GID, + separated by a colon (`[<:group-name|GID>]`). + type: "string" + example: "123:456" + AttachStdin: + description: "Whether to attach to `stdin`." + type: "boolean" + default: false + AttachStdout: + description: "Whether to attach to `stdout`." + type: "boolean" + default: true + AttachStderr: + description: "Whether to attach to `stderr`." + type: "boolean" + default: true + ExposedPorts: + description: | + An object mapping ports to an empty object in the form: + + `{"/": {}}` + type: "object" + x-nullable: true + additionalProperties: + type: "object" + enum: + - {} + default: {} + example: { + "80/tcp": {}, + "443/tcp": {} + } + Tty: + description: | + Attach standard streams to a TTY, including `stdin` if it is not closed. + type: "boolean" + default: false + OpenStdin: + description: "Open `stdin`" + type: "boolean" + default: false + StdinOnce: + description: "Close `stdin` after one attached client disconnects" + type: "boolean" + default: false + Env: + description: | + A list of environment variables to set inside the container in the + form `["VAR=value", ...]`. A variable without `=` is removed from the + environment, rather than to have an empty value. + type: "array" + items: + type: "string" + example: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Cmd: + description: | + Command to run specified as a string or an array of strings. + type: "array" + items: + type: "string" + example: ["/bin/sh"] + Healthcheck: + $ref: "#/definitions/HealthConfig" + ArgsEscaped: + description: "Command is already escaped (Windows only)" + type: "boolean" + default: false + example: false + x-nullable: true + Image: + description: | + The name (or reference) of the image to use when creating the container, + or which was used when the container was created. + type: "string" + example: "example-image:1.0" + Volumes: + description: | + An object mapping mount point paths inside the container to empty + objects. + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + WorkingDir: + description: "The working directory for commands to run in." + type: "string" + example: "/public/" + Entrypoint: + description: | + The entry point for the container as a string or an array of strings. + + If the array consists of exactly one empty string (`[""]`) then the + entry point is reset to system default (i.e., the entry point used by + docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + type: "array" + items: + type: "string" + example: [] + NetworkDisabled: + description: "Disable networking for the container." + type: "boolean" + x-nullable: true + MacAddress: + description: | + MAC address of the container. + + Deprecated: this field is deprecated in API v1.44 and up. Use EndpointSettings.MacAddress instead. + type: "string" + x-nullable: true + OnBuild: + description: | + `ONBUILD` metadata that were defined in the image's `Dockerfile`. + type: "array" + x-nullable: true + items: + type: "string" + example: [] + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + StopSignal: + description: | + Signal to stop a container as a string or unsigned integer. + type: "string" + example: "SIGTERM" + x-nullable: true + StopTimeout: + description: "Timeout to stop a container in seconds." + type: "integer" + default: 10 + x-nullable: true + Shell: + description: | + Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. + type: "array" + x-nullable: true + items: + type: "string" + example: ["/bin/sh", "-c"] + + ImageConfig: + description: | + Configuration of the image. These fields are used as defaults + when starting a container from the image. + type: "object" + properties: + Hostname: + description: | + The hostname to use for the container, as a valid RFC 1123 hostname. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always empty. It must not be used, and will be removed in API v1.50. + type: "string" + example: "" + Domainname: + description: | + The domain name to use for the container. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always empty. It must not be used, and will be removed in API v1.50. + type: "string" + example: "" + User: + description: "The user that commands are run as inside the container." + type: "string" + example: "web:web" + AttachStdin: + description: | + Whether to attach to `stdin`. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always false. It must not be used, and will be removed in API v1.50. + type: "boolean" + default: false + example: false + AttachStdout: + description: | + Whether to attach to `stdout`. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always false. It must not be used, and will be removed in API v1.50. + type: "boolean" + default: false + example: false + AttachStderr: + description: | + Whether to attach to `stderr`. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always false. It must not be used, and will be removed in API v1.50. + type: "boolean" + default: false + example: false + ExposedPorts: + description: | + An object mapping ports to an empty object in the form: + + `{"/": {}}` + type: "object" + x-nullable: true + additionalProperties: + type: "object" + enum: + - {} + default: {} + example: { + "80/tcp": {}, + "443/tcp": {} + } + Tty: + description: | + Attach standard streams to a TTY, including `stdin` if it is not closed. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always false. It must not be used, and will be removed in API v1.50. + type: "boolean" + default: false + example: false + OpenStdin: + description: | + Open `stdin` + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always false. It must not be used, and will be removed in API v1.50. + type: "boolean" + default: false + example: false + StdinOnce: + description: | + Close `stdin` after one attached client disconnects. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always false. It must not be used, and will be removed in API v1.50. + type: "boolean" + default: false + example: false + Env: + description: | + A list of environment variables to set inside the container in the + form `["VAR=value", ...]`. A variable without `=` is removed from the + environment, rather than to have an empty value. + type: "array" + items: + type: "string" + example: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Cmd: + description: | + Command to run specified as a string or an array of strings. + type: "array" + items: + type: "string" + example: ["/bin/sh"] + Healthcheck: + $ref: "#/definitions/HealthConfig" + ArgsEscaped: + description: "Command is already escaped (Windows only)" + type: "boolean" + default: false + example: false + x-nullable: true + Image: + description: | + The name (or reference) of the image to use when creating the container, + or which was used when the container was created. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always empty. It must not be used, and will be removed in API v1.50. + type: "string" + default: "" + example: "" + Volumes: + description: | + An object mapping mount point paths inside the container to empty + objects. + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + example: + "/app/data": {} + "/app/config": {} + WorkingDir: + description: "The working directory for commands to run in." + type: "string" + example: "/public/" + Entrypoint: + description: | + The entry point for the container as a string or an array of strings. + + If the array consists of exactly one empty string (`[""]`) then the + entry point is reset to system default (i.e., the entry point used by + docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + type: "array" + items: + type: "string" + example: [] + NetworkDisabled: + description: | + Disable networking for the container. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always omitted. It must not be used, and will be removed in API v1.50. + type: "boolean" + default: false + example: false + x-nullable: true + MacAddress: + description: | + MAC address of the container. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always omitted. It must not be used, and will be removed in API v1.50. + type: "string" + default: "" + example: "" + x-nullable: true + OnBuild: + description: | + `ONBUILD` metadata that were defined in the image's `Dockerfile`. + type: "array" + x-nullable: true + items: + type: "string" + example: [] + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + StopSignal: + description: | + Signal to stop a container as a string or unsigned integer. + type: "string" + example: "SIGTERM" + x-nullable: true + StopTimeout: + description: | + Timeout to stop a container in seconds. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always omitted. It must not be used, and will be removed in API v1.50. + type: "integer" + default: 10 + x-nullable: true + Shell: + description: | + Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. + type: "array" + x-nullable: true + items: + type: "string" + example: ["/bin/sh", "-c"] + # FIXME(thaJeztah): temporarily using a full example to remove some "omitempty" fields. Remove once the fields are removed. + example: + "Hostname": "" + "Domainname": "" + "User": "web:web" + "AttachStdin": false + "AttachStdout": false + "AttachStderr": false + "ExposedPorts": { + "80/tcp": {}, + "443/tcp": {} + } + "Tty": false + "OpenStdin": false + "StdinOnce": false + "Env": ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"] + "Cmd": ["/bin/sh"] + "Healthcheck": { + "Test": ["string"], + "Interval": 0, + "Timeout": 0, + "Retries": 0, + "StartPeriod": 0, + "StartInterval": 0 + } + "ArgsEscaped": true + "Image": "" + "Volumes": { + "/app/data": {}, + "/app/config": {} + } + "WorkingDir": "/public/" + "Entrypoint": [] + "OnBuild": [] + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } + "StopSignal": "SIGTERM" + "Shell": ["/bin/sh", "-c"] + + NetworkingConfig: + description: | + NetworkingConfig represents the container's networking configuration for + each of its interfaces. + It is used for the networking configs specified in the `docker create` + and `docker network connect` commands. + type: "object" + properties: + EndpointsConfig: + description: | + A mapping of network name to endpoint configuration for that network. + The endpoint configuration can be left empty to connect to that + network with no particular endpoint configuration. + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + example: + # putting an example here, instead of using the example values from + # /definitions/EndpointSettings, because EndpointSettings contains + # operational data returned when inspecting a container that we don't + # accept here. + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + MacAddress: "02:42:ac:12:05:02" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + database_nw: {} + + NetworkSettings: + description: "NetworkSettings exposes the network settings in the API" + type: "object" + properties: + Bridge: + description: | + Name of the default bridge interface when dockerd's --bridge flag is set. + type: "string" + example: "docker0" + SandboxID: + description: SandboxID uniquely represents a container's network stack. + type: "string" + example: "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3" + HairpinMode: + description: | + Indicates if hairpin NAT should be enabled on the virtual interface. + + Deprecated: This field is never set and will be removed in a future release. + type: "boolean" + example: false + LinkLocalIPv6Address: + description: | + IPv6 unicast address using the link-local prefix. + + Deprecated: This field is never set and will be removed in a future release. + type: "string" + example: "" + LinkLocalIPv6PrefixLen: + description: | + Prefix length of the IPv6 unicast address. + + Deprecated: This field is never set and will be removed in a future release. + type: "integer" + example: "" + Ports: + $ref: "#/definitions/PortMap" + SandboxKey: + description: SandboxKey is the full path of the netns handle + type: "string" + example: "/var/run/docker/netns/8ab54b426c38" + + SecondaryIPAddresses: + description: "Deprecated: This field is never set and will be removed in a future release." + type: "array" + items: + $ref: "#/definitions/Address" + x-nullable: true + + SecondaryIPv6Addresses: + description: "Deprecated: This field is never set and will be removed in a future release." + type: "array" + items: + $ref: "#/definitions/Address" + x-nullable: true + + # TODO properties below are part of DefaultNetworkSettings, which is + # marked as deprecated since Docker 1.9 and to be removed in Docker v17.12 + EndpointID: + description: | + EndpointID uniquely represents a service endpoint in a Sandbox. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" + Gateway: + description: | + Gateway address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "172.17.0.1" + GlobalIPv6Address: + description: | + Global IPv6 address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "2001:db8::5689" + GlobalIPv6PrefixLen: + description: | + Mask length of the global IPv6 address. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "integer" + example: 64 + IPAddress: + description: | + IPv4 address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "172.17.0.4" + IPPrefixLen: + description: | + Mask length of the IPv4 address. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "integer" + example: 16 + IPv6Gateway: + description: | + IPv6 gateway address for this network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "2001:db8:2::100" + MacAddress: + description: | + MAC address for the container on the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "02:42:ac:11:00:04" + Networks: + description: | + Information about all networks that the container is connected to. + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + + Address: + description: Address represents an IPv4 or IPv6 IP address. + type: "object" + properties: + Addr: + description: IP address. + type: "string" + PrefixLen: + description: Mask length of the IP address. + type: "integer" + + PortMap: + description: | + PortMap describes the mapping of container ports to host ports, using the + container's port-number and protocol as key in the format `/`, + for example, `80/udp`. + + If a container's port is mapped for multiple protocols, separate entries + are added to the mapping table. + type: "object" + additionalProperties: + type: "array" + x-nullable: true + items: + $ref: "#/definitions/PortBinding" + example: + "443/tcp": + - HostIp: "127.0.0.1" + HostPort: "4443" + "80/tcp": + - HostIp: "0.0.0.0" + HostPort: "80" + - HostIp: "0.0.0.0" + HostPort: "8080" + "80/udp": + - HostIp: "0.0.0.0" + HostPort: "80" + "53/udp": + - HostIp: "0.0.0.0" + HostPort: "53" + "2377/tcp": null + + PortBinding: + description: | + PortBinding represents a binding between a host IP address and a host + port. + type: "object" + properties: + HostIp: + description: "Host IP address that the container's port is mapped to." + type: "string" + example: "127.0.0.1" + HostPort: + description: "Host port number that the container's port is mapped to." + type: "string" + example: "4443" + + DriverData: + description: | + Information about the storage driver used to store the container's and + image's filesystem. + type: "object" + required: [Name, Data] + properties: + Name: + description: "Name of the storage driver." + type: "string" + x-nullable: false + example: "overlay2" + Data: + description: | + Low-level storage metadata, provided as key/value pairs. + + This information is driver-specific, and depends on the storage-driver + in use, and should be used for informational purposes only. + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: { + "MergedDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/merged", + "UpperDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/diff", + "WorkDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/work" + } + + FilesystemChange: + description: | + Change in the container's filesystem. + type: "object" + required: [Path, Kind] + properties: + Path: + description: | + Path to file or directory that has changed. + type: "string" + x-nullable: false + Kind: + $ref: "#/definitions/ChangeType" + + ChangeType: + description: | + Kind of change + + Can be one of: + + - `0`: Modified ("C") + - `1`: Added ("A") + - `2`: Deleted ("D") + type: "integer" + format: "uint8" + enum: [0, 1, 2] + x-nullable: false + + ImageInspect: + description: | + Information about an image in the local image cache. + type: "object" + properties: + Id: + description: | + ID is the content-addressable ID of an image. + + This identifier is a content-addressable digest calculated from the + image's configuration (which includes the digests of layers used by + the image). + + Note that this digest differs from the `RepoDigests` below, which + holds digests of image manifests that reference the image. + type: "string" + x-nullable: false + example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" + Descriptor: + description: | + Descriptor is an OCI descriptor of the image target. + In case of a multi-platform image, this descriptor points to the OCI index + or a manifest list. + + This field is only present if the daemon provides a multi-platform image store. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + x-nullable: true + $ref: "#/definitions/OCIDescriptor" + Manifests: + description: | + Manifests is a list of image manifests available in this image. It + provides a more detailed view of the platform-specific image manifests or + other image-attached data like build attestations. + + Only available if the daemon provides a multi-platform image store + and the `manifests` option is set in the inspect request. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + type: "array" + x-nullable: true + items: + $ref: "#/definitions/ImageManifestSummary" + RepoTags: + description: | + List of image names/tags in the local image cache that reference this + image. + + Multiple image tags can refer to the same image, and this list may be + empty if no tags reference the image, in which case the image is + "untagged", in which case it can still be referenced by its ID. + type: "array" + items: + type: "string" + example: + - "example:1.0" + - "example:latest" + - "example:stable" + - "internal.registry.example.com:5000/example:1.0" + RepoDigests: + description: | + List of content-addressable digests of locally available image manifests + that the image is referenced from. Multiple manifests can refer to the + same image. + + These digests are usually only available if the image was either pulled + from a registry, or if the image was pushed to a registry, which is when + the manifest is generated and its digest calculated. + type: "array" + items: + type: "string" + example: + - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" + - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + Parent: + description: | + ID of the parent image. + + Depending on how the image was created, this field may be empty and + is only set for images that were built/created locally. This field + is empty if the image was pulled from an image registry. + type: "string" + x-nullable: false + example: "" + Comment: + description: | + Optional message that was set when committing or importing the image. + type: "string" + x-nullable: false + example: "" + Created: + description: | + Date and time at which the image was created, formatted in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + + This information is only available if present in the image, + and omitted otherwise. + type: "string" + format: "dateTime" + x-nullable: true + example: "2022-02-04T21:20:12.497794809Z" + DockerVersion: + description: | + The version of Docker that was used to build the image. + + Depending on how the image was created, this field may be empty. + type: "string" + x-nullable: false + example: "27.0.1" + Author: + description: | + Name of the author that was specified when committing the image, or as + specified through MAINTAINER (deprecated) in the Dockerfile. + type: "string" + x-nullable: false + example: "" + Config: + $ref: "#/definitions/ImageConfig" + Architecture: + description: | + Hardware CPU architecture that the image runs on. + type: "string" + x-nullable: false + example: "arm" + Variant: + description: | + CPU architecture variant (presently ARM-only). + type: "string" + x-nullable: true + example: "v7" + Os: + description: | + Operating System the image is built to run on. + type: "string" + x-nullable: false + example: "linux" + OsVersion: + description: | + Operating System version the image is built to run on (especially + for Windows). + type: "string" + example: "" + x-nullable: true + Size: + description: | + Total size of the image including all layers it is composed of. + type: "integer" + format: "int64" + x-nullable: false + example: 1239828 + VirtualSize: + description: | + Total size of the image including all layers it is composed of. + + Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. + type: "integer" + format: "int64" + example: 1239828 + GraphDriver: + $ref: "#/definitions/DriverData" + RootFS: + description: | + Information about the image's RootFS, including the layer IDs. + type: "object" + required: [Type] + properties: + Type: + type: "string" + x-nullable: false + example: "layers" + Layers: + type: "array" + items: + type: "string" + example: + - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" + - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + Metadata: + description: | + Additional metadata of the image in the local cache. This information + is local to the daemon, and not part of the image itself. + type: "object" + properties: + LastTagTime: + description: | + Date and time at which the image was last tagged in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + + This information is only available if the image was tagged locally, + and omitted otherwise. + type: "string" + format: "dateTime" + example: "2022-02-28T14:40:02.623929178Z" + x-nullable: true + + ImageSummary: + type: "object" + x-go-name: "Summary" + required: + - Id + - ParentId + - RepoTags + - RepoDigests + - Created + - Size + - SharedSize + - Labels + - Containers + properties: + Id: + description: | + ID is the content-addressable ID of an image. + + This identifier is a content-addressable digest calculated from the + image's configuration (which includes the digests of layers used by + the image). + + Note that this digest differs from the `RepoDigests` below, which + holds digests of image manifests that reference the image. + type: "string" + x-nullable: false + example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" + ParentId: + description: | + ID of the parent image. + + Depending on how the image was created, this field may be empty and + is only set for images that were built/created locally. This field + is empty if the image was pulled from an image registry. + type: "string" + x-nullable: false + example: "" + RepoTags: + description: | + List of image names/tags in the local image cache that reference this + image. + + Multiple image tags can refer to the same image, and this list may be + empty if no tags reference the image, in which case the image is + "untagged", in which case it can still be referenced by its ID. + type: "array" + x-nullable: false + items: + type: "string" + example: + - "example:1.0" + - "example:latest" + - "example:stable" + - "internal.registry.example.com:5000/example:1.0" + RepoDigests: + description: | + List of content-addressable digests of locally available image manifests + that the image is referenced from. Multiple manifests can refer to the + same image. + + These digests are usually only available if the image was either pulled + from a registry, or if the image was pushed to a registry, which is when + the manifest is generated and its digest calculated. + type: "array" + x-nullable: false + items: + type: "string" + example: + - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" + - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + Created: + description: | + Date and time at which the image was created as a Unix timestamp + (number of seconds since EPOCH). + type: "integer" + x-nullable: false + example: "1644009612" + Size: + description: | + Total size of the image including all layers it is composed of. + type: "integer" + format: "int64" + x-nullable: false + example: 172064416 + SharedSize: + description: | + Total size of image layers that are shared between this image and other + images. + + This size is not calculated by default. `-1` indicates that the value + has not been set / calculated. + type: "integer" + format: "int64" + x-nullable: false + example: 1239828 + VirtualSize: + description: |- + Total size of the image including all layers it is composed of. + + Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. + type: "integer" + format: "int64" + example: 172064416 + Labels: + description: "User-defined key/value metadata." + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Containers: + description: | + Number of containers using this image. Includes both stopped and running + containers. + + This size is not calculated by default, and depends on which API endpoint + is used. `-1` indicates that the value has not been set / calculated. + x-nullable: false + type: "integer" + example: 2 + Manifests: + description: | + Manifests is a list of manifests available in this image. + It provides a more detailed view of the platform-specific image manifests + or other image-attached data like build attestations. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + type: "array" + x-nullable: false + x-omitempty: true + items: + $ref: "#/definitions/ImageManifestSummary" + Descriptor: + description: | + Descriptor is an OCI descriptor of the image target. + In case of a multi-platform image, this descriptor points to the OCI index + or a manifest list. + + This field is only present if the daemon provides a multi-platform image store. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + x-nullable: true + $ref: "#/definitions/OCIDescriptor" + + AuthConfig: + type: "object" + properties: + username: + type: "string" + password: + type: "string" + email: + type: "string" + serveraddress: + type: "string" + example: + username: "hannibal" + password: "xxxx" + serveraddress: "https://index.docker.io/v1/" + + ProcessConfig: + type: "object" + properties: + privileged: + type: "boolean" + user: + type: "string" + tty: + type: "boolean" + entrypoint: + type: "string" + arguments: + type: "array" + items: + type: "string" + + Volume: + type: "object" + required: [Name, Driver, Mountpoint, Labels, Scope, Options] + properties: + Name: + type: "string" + description: "Name of the volume." + x-nullable: false + example: "tardis" + Driver: + type: "string" + description: "Name of the volume driver used by the volume." + x-nullable: false + example: "custom" + Mountpoint: + type: "string" + description: "Mount path of the volume on the host." + x-nullable: false + example: "/var/lib/docker/volumes/tardis" + CreatedAt: + type: "string" + format: "dateTime" + description: "Date/Time the volume was created." + example: "2016-06-07T20:31:11.853781916Z" + Status: + type: "object" + description: | + Low-level details about the volume, provided by the volume driver. + Details are returned as a map with key/value pairs: + `{"key":"value","key2":"value2"}`. + + The `Status` field is optional, and is omitted if the volume driver + does not support this feature. + additionalProperties: + type: "object" + example: + hello: "world" + Labels: + type: "object" + description: "User-defined key/value metadata." + x-nullable: false + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Scope: + type: "string" + description: | + The level at which the volume exists. Either `global` for cluster-wide, + or `local` for machine level. + default: "local" + x-nullable: false + enum: ["local", "global"] + example: "local" + ClusterVolume: + $ref: "#/definitions/ClusterVolume" + Options: + type: "object" + description: | + The driver specific options used when creating the volume. + additionalProperties: + type: "string" + example: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + UsageData: + type: "object" + x-nullable: true + x-go-name: "UsageData" + required: [Size, RefCount] + description: | + Usage details about the volume. This information is used by the + `GET /system/df` endpoint, and omitted in other endpoints. + properties: + Size: + type: "integer" + format: "int64" + default: -1 + description: | + Amount of disk space used by the volume (in bytes). This information + is only available for volumes created with the `"local"` volume + driver. For volumes created with other volume drivers, this field + is set to `-1` ("not available") + x-nullable: false + RefCount: + type: "integer" + format: "int64" + default: -1 + description: | + The number of containers referencing this volume. This field + is set to `-1` if the reference-count is not available. + x-nullable: false + + VolumeCreateOptions: + description: "Volume configuration" + type: "object" + title: "VolumeConfig" + x-go-name: "CreateOptions" + properties: + Name: + description: | + The new volume's name. If not specified, Docker generates a name. + type: "string" + x-nullable: false + example: "tardis" + Driver: + description: "Name of the volume driver to use." + type: "string" + default: "local" + x-nullable: false + example: "custom" + DriverOpts: + description: | + A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. + type: "object" + additionalProperties: + type: "string" + example: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + ClusterVolumeSpec: + $ref: "#/definitions/ClusterVolumeSpec" + + VolumeListResponse: + type: "object" + title: "VolumeListResponse" + x-go-name: "ListResponse" + description: "Volume list response" + properties: + Volumes: + type: "array" + description: "List of volumes" + items: + $ref: "#/definitions/Volume" + Warnings: + type: "array" + description: | + Warnings that occurred when fetching the list of volumes. + items: + type: "string" + example: [] + + Network: + type: "object" + properties: + Name: + description: | + Name of the network. + type: "string" + example: "my_network" + Id: + description: | + ID that uniquely identifies a network on a single machine. + type: "string" + example: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" + Created: + description: | + Date and time at which the network was created in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-10-19T04:33:30.360899459Z" + Scope: + description: | + The level at which the network exists (e.g. `swarm` for cluster-wide + or `local` for machine level) + type: "string" + example: "local" + Driver: + description: | + The name of the driver used to create the network (e.g. `bridge`, + `overlay`). + type: "string" + example: "overlay" + EnableIPv4: + description: | + Whether the network was created with IPv4 enabled. + type: "boolean" + example: true + EnableIPv6: + description: | + Whether the network was created with IPv6 enabled. + type: "boolean" + example: false + IPAM: + $ref: "#/definitions/IPAM" + Internal: + description: | + Whether the network is created to only allow internal networking + connectivity. + type: "boolean" + default: false + example: false + Attachable: + description: | + Whether a global / swarm scope network is manually attachable by regular + containers from workers in swarm mode. + type: "boolean" + default: false + example: false + Ingress: + description: | + Whether the network is providing the routing-mesh for the swarm cluster. + type: "boolean" + default: false + example: false + ConfigFrom: + $ref: "#/definitions/ConfigReference" + ConfigOnly: + description: | + Whether the network is a config-only network. Config-only networks are + placeholder networks for network configurations to be used by other + networks. Config-only networks cannot be used directly to run containers + or services. + type: "boolean" + default: false + Containers: + description: | + Contains endpoints attached to the network. + type: "object" + additionalProperties: + $ref: "#/definitions/NetworkContainer" + example: + 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: + Name: "test" + EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" + MacAddress: "02:42:ac:13:00:02" + IPv4Address: "172.19.0.2/16" + IPv6Address: "" + Options: + description: | + Network-specific options uses when creating the network. + type: "object" + additionalProperties: + type: "string" + example: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Peers: + description: | + List of peer nodes for an overlay network. This field is only present + for overlay networks, and omitted for other network types. + type: "array" + items: + $ref: "#/definitions/PeerInfo" + x-nullable: true + # TODO: Add Services (only present when "verbose" is set). + + ConfigReference: + description: | + The config-only network source to provide the configuration for + this network. + type: "object" + properties: + Network: + description: | + The name of the config-only network that provides the network's + configuration. The specified network must be an existing config-only + network. Only network names are allowed, not network IDs. + type: "string" + example: "config_only_network_01" + + IPAM: + type: "object" + properties: + Driver: + description: "Name of the IPAM driver to use." + type: "string" + default: "default" + example: "default" + Config: + description: | + List of IPAM configuration options, specified as a map: + + ``` + {"Subnet": , "IPRange": , "Gateway": , "AuxAddress": } + ``` + type: "array" + items: + $ref: "#/definitions/IPAMConfig" + Options: + description: "Driver-specific options, specified as a map." + type: "object" + additionalProperties: + type: "string" + example: + foo: "bar" + + IPAMConfig: + type: "object" + properties: + Subnet: + type: "string" + example: "172.20.0.0/16" + IPRange: + type: "string" + example: "172.20.10.0/24" + Gateway: + type: "string" + example: "172.20.10.11" + AuxiliaryAddresses: + type: "object" + additionalProperties: + type: "string" + + NetworkContainer: + type: "object" + properties: + Name: + type: "string" + example: "container_1" + EndpointID: + type: "string" + example: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" + MacAddress: + type: "string" + example: "02:42:ac:13:00:02" + IPv4Address: + type: "string" + example: "172.19.0.2/16" + IPv6Address: + type: "string" + example: "" + + PeerInfo: + description: | + PeerInfo represents one peer of an overlay network. + type: "object" + properties: + Name: + description: + ID of the peer-node in the Swarm cluster. + type: "string" + example: "6869d7c1732b" + IP: + description: + IP-address of the peer-node in the Swarm cluster. + type: "string" + example: "10.133.77.91" + + NetworkCreateResponse: + description: "OK response to NetworkCreate operation" + type: "object" + title: "NetworkCreateResponse" + x-go-name: "CreateResponse" + required: [Id, Warning] + properties: + Id: + description: "The ID of the created network." + type: "string" + x-nullable: false + example: "b5c4fc71e8022147cd25de22b22173de4e3b170134117172eb595cb91b4e7e5d" + Warning: + description: "Warnings encountered when creating the container" + type: "string" + x-nullable: false + example: "" + + BuildInfo: + type: "object" + properties: + id: + type: "string" + stream: + type: "string" + error: + type: "string" + x-nullable: true + description: |- + errors encountered during the operation. + + + > **Deprecated**: This field is deprecated since API v1.4, and will be omitted in a future API version. Use the information in errorDetail instead. + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + x-nullable: true + description: |- + Progress is a pre-formatted presentation of progressDetail. + + + > **Deprecated**: This field is deprecated since API v1.8, and will be omitted in a future API version. Use the information in progressDetail instead. + progressDetail: + $ref: "#/definitions/ProgressDetail" + aux: + $ref: "#/definitions/ImageID" + + BuildCache: + type: "object" + description: | + BuildCache contains information about a build cache record. + properties: + ID: + type: "string" + description: | + Unique ID of the build cache record. + example: "ndlpt0hhvkqcdfkputsk4cq9c" + Parent: + description: | + ID of the parent build cache record. + + > **Deprecated**: This field is deprecated, and omitted if empty. + type: "string" + x-nullable: true + example: "" + Parents: + description: | + List of parent build cache record IDs. + type: "array" + items: + type: "string" + x-nullable: true + example: ["hw53o5aio51xtltp5xjp8v7fx"] + Type: + type: "string" + description: | + Cache record type. + example: "regular" + # see https://github.com/moby/buildkit/blob/fce4a32258dc9d9664f71a4831d5de10f0670677/client/diskusage.go#L75-L84 + enum: + - "internal" + - "frontend" + - "source.local" + - "source.git.checkout" + - "exec.cachemount" + - "regular" + Description: + type: "string" + description: | + Description of the build-step that produced the build cache. + example: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" + InUse: + type: "boolean" + description: | + Indicates if the build cache is in use. + example: false + Shared: + type: "boolean" + description: | + Indicates if the build cache is shared. + example: true + Size: + description: | + Amount of disk space used by the build cache (in bytes). + type: "integer" + example: 51 + CreatedAt: + description: | + Date and time at which the build cache was created in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + LastUsedAt: + description: | + Date and time at which the build cache was last used in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + x-nullable: true + example: "2017-08-09T07:09:37.632105588Z" + UsageCount: + type: "integer" + example: 26 + + ImageID: + type: "object" + description: "Image ID or Digest" + properties: + ID: + type: "string" + example: + ID: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" + + CreateImageInfo: + type: "object" + properties: + id: + type: "string" + error: + type: "string" + x-nullable: true + description: |- + errors encountered during the operation. + + + > **Deprecated**: This field is deprecated since API v1.4, and will be omitted in a future API version. Use the information in errorDetail instead. + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + x-nullable: true + description: |- + Progress is a pre-formatted presentation of progressDetail. + + + > **Deprecated**: This field is deprecated since API v1.8, and will be omitted in a future API version. Use the information in progressDetail instead. + progressDetail: + $ref: "#/definitions/ProgressDetail" + + PushImageInfo: + type: "object" + properties: + error: + type: "string" + x-nullable: true + description: |- + errors encountered during the operation. + + + > **Deprecated**: This field is deprecated since API v1.4, and will be omitted in a future API version. Use the information in errorDetail instead. + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + x-nullable: true + description: |- + Progress is a pre-formatted presentation of progressDetail. + + + > **Deprecated**: This field is deprecated since API v1.8, and will be omitted in a future API version. Use the information in progressDetail instead. + progressDetail: + $ref: "#/definitions/ProgressDetail" + + ErrorDetail: + type: "object" + properties: + code: + type: "integer" + message: + type: "string" + + ProgressDetail: + type: "object" + properties: + current: + type: "integer" + total: + type: "integer" + + ErrorResponse: + description: "Represents an error." + type: "object" + required: ["message"] + properties: + message: + description: "The error message." + type: "string" + x-nullable: false + example: + message: "Something went wrong." + + IDResponse: + description: "Response to an API call that returns just an Id" + type: "object" + x-go-name: "IDResponse" + required: ["Id"] + properties: + Id: + description: "The id of the newly created object." + type: "string" + x-nullable: false + + EndpointSettings: + description: "Configuration for a network endpoint." + type: "object" + properties: + # Configurations + IPAMConfig: + $ref: "#/definitions/EndpointIPAMConfig" + Links: + type: "array" + items: + type: "string" + example: + - "container_1" + - "container_2" + MacAddress: + description: | + MAC address for the endpoint on this network. The network driver might ignore this parameter. + type: "string" + example: "02:42:ac:11:00:04" + Aliases: + type: "array" + items: + type: "string" + example: + - "server_x" + - "server_y" + DriverOpts: + description: | + DriverOpts is a mapping of driver options and values. These options + are passed directly to the driver and are driver specific. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + GwPriority: + description: | + This property determines which endpoint will provide the default + gateway for a container. The endpoint with the highest priority will + be used. If multiple endpoints have the same priority, endpoints are + lexicographically sorted based on their network name, and the one + that sorts first is picked. + type: "integer" + format: "int64" + example: + - 10 + + # Operational data + NetworkID: + description: | + Unique ID of the network. + type: "string" + example: "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a" + EndpointID: + description: | + Unique ID for the service endpoint in a Sandbox. + type: "string" + example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" + Gateway: + description: | + Gateway address for this network. + type: "string" + example: "172.17.0.1" + IPAddress: + description: | + IPv4 address. + type: "string" + example: "172.17.0.4" + IPPrefixLen: + description: | + Mask length of the IPv4 address. + type: "integer" + example: 16 + IPv6Gateway: + description: | + IPv6 gateway address. + type: "string" + example: "2001:db8:2::100" + GlobalIPv6Address: + description: | + Global IPv6 address. + type: "string" + example: "2001:db8::5689" + GlobalIPv6PrefixLen: + description: | + Mask length of the global IPv6 address. + type: "integer" + format: "int64" + example: 64 + DNSNames: + description: | + List of all DNS names an endpoint has on a specific network. This + list is based on the container name, network aliases, container short + ID, and hostname. + + These DNS names are non-fully qualified but can contain several dots. + You can get fully qualified DNS names by appending `.`. + For instance, if container name is `my.ctr` and the network is named + `testnet`, `DNSNames` will contain `my.ctr` and the FQDN will be + `my.ctr.testnet`. + type: array + items: + type: string + example: ["foobar", "server_x", "server_y", "my.ctr"] + + EndpointIPAMConfig: + description: | + EndpointIPAMConfig represents an endpoint's IPAM configuration. + type: "object" + x-nullable: true + properties: + IPv4Address: + type: "string" + example: "172.20.30.33" + IPv6Address: + type: "string" + example: "2001:db8:abcd::3033" + LinkLocalIPs: + type: "array" + items: + type: "string" + example: + - "169.254.34.68" + - "fe80::3468" + + PluginMount: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Source, Destination, Type, Options] + properties: + Name: + type: "string" + x-nullable: false + example: "some-mount" + Description: + type: "string" + x-nullable: false + example: "This is a mount that's used by the plugin." + Settable: + type: "array" + items: + type: "string" + Source: + type: "string" + example: "/var/lib/docker/plugins/" + Destination: + type: "string" + x-nullable: false + example: "/mnt/state" + Type: + type: "string" + x-nullable: false + example: "bind" + Options: + type: "array" + items: + type: "string" + example: + - "rbind" + - "rw" + + PluginDevice: + type: "object" + required: [Name, Description, Settable, Path] + x-nullable: false + properties: + Name: + type: "string" + x-nullable: false + Description: + type: "string" + x-nullable: false + Settable: + type: "array" + items: + type: "string" + Path: + type: "string" + example: "/dev/fuse" + + PluginEnv: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + Description: + x-nullable: false + type: "string" + Settable: + type: "array" + items: + type: "string" + Value: + type: "string" + + PluginInterfaceType: + type: "object" + x-nullable: false + required: [Prefix, Capability, Version] + properties: + Prefix: + type: "string" + x-nullable: false + Capability: + type: "string" + x-nullable: false + Version: + type: "string" + x-nullable: false + + PluginPrivilege: + description: | + Describes a permission the user has to accept upon installing + the plugin. + type: "object" + x-go-name: "PluginPrivilege" + properties: + Name: + type: "string" + example: "network" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - "host" + + Plugin: + description: "A plugin for the Engine API" + type: "object" + required: [Settings, Enabled, Config, Name] + properties: + Id: + type: "string" + example: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" + Name: + type: "string" + x-nullable: false + example: "tiborvass/sample-volume-plugin" + Enabled: + description: + True if the plugin is running. False if the plugin is not running, + only installed. + type: "boolean" + x-nullable: false + example: true + Settings: + description: "Settings that can be modified by users." + type: "object" + x-nullable: false + required: [Args, Devices, Env, Mounts] + properties: + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + type: "string" + example: + - "DEBUG=0" + Args: + type: "array" + items: + type: "string" + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PluginReference: + description: "plugin remote reference used to push/pull the plugin" + type: "string" + x-nullable: false + example: "localhost:5000/tiborvass/sample-volume-plugin:latest" + Config: + description: "The config of a plugin." + type: "object" + x-nullable: false + required: + - Description + - Documentation + - Interface + - Entrypoint + - WorkDir + - Network + - Linux + - PidHost + - PropagatedMount + - IpcHost + - Mounts + - Env + - Args + properties: + DockerVersion: + description: "Docker Version used to create the plugin" + type: "string" + x-nullable: false + example: "17.06.0-ce" + Description: + type: "string" + x-nullable: false + example: "A sample volume plugin for Docker" + Documentation: + type: "string" + x-nullable: false + example: "https://docs.docker.com/engine/extend/plugins/" + Interface: + description: "The interface between Docker and the plugin" + x-nullable: false + type: "object" + required: [Types, Socket] + properties: + Types: + type: "array" + items: + $ref: "#/definitions/PluginInterfaceType" + example: + - "docker.volumedriver/1.0" + Socket: + type: "string" + x-nullable: false + example: "plugins.sock" + ProtocolScheme: + type: "string" + example: "some.protocol/v1.0" + description: "Protocol to use for clients connecting to the plugin." + enum: + - "" + - "moby.plugins.http/v1" + Entrypoint: + type: "array" + items: + type: "string" + example: + - "/usr/bin/sample-volume-plugin" + - "/data" + WorkDir: + type: "string" + x-nullable: false + example: "/bin/" + User: + type: "object" + x-nullable: false + properties: + UID: + type: "integer" + format: "uint32" + example: 1000 + GID: + type: "integer" + format: "uint32" + example: 1000 + Network: + type: "object" + x-nullable: false + required: [Type] + properties: + Type: + x-nullable: false + type: "string" + example: "host" + Linux: + type: "object" + x-nullable: false + required: [Capabilities, AllowAllDevices, Devices] + properties: + Capabilities: + type: "array" + items: + type: "string" + example: + - "CAP_SYS_ADMIN" + - "CAP_SYSLOG" + AllowAllDevices: + type: "boolean" + x-nullable: false + example: false + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PropagatedMount: + type: "string" + x-nullable: false + example: "/mnt/volumes" + IpcHost: + type: "boolean" + x-nullable: false + example: false + PidHost: + type: "boolean" + x-nullable: false + example: false + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + $ref: "#/definitions/PluginEnv" + example: + - Name: "DEBUG" + Description: "If set, prints debug messages" + Settable: null + Value: "0" + Args: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + example: "args" + Description: + x-nullable: false + type: "string" + example: "command line arguments" + Settable: + type: "array" + items: + type: "string" + Value: + type: "array" + items: + type: "string" + rootfs: + type: "object" + properties: + type: + type: "string" + example: "layers" + diff_ids: + type: "array" + items: + type: "string" + example: + - "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887" + - "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" + + ObjectVersion: + description: | + The version number of the object such as node, service, etc. This is needed + to avoid conflicting writes. The client must send the version number along + with the modified specification when updating these objects. + + This approach ensures safe concurrency and determinism in that the change + on the object may not be applied if the version number has changed from the + last read. In other words, if two update requests specify the same base + version, only one of the requests can succeed. As a result, two separate + update requests that happen at the same time will not unintentionally + overwrite each other. + type: "object" + properties: + Index: + type: "integer" + format: "uint64" + example: 373531 + + NodeSpec: + type: "object" + properties: + Name: + description: "Name for the node." + type: "string" + example: "my-node" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Role: + description: "Role of the node." + type: "string" + enum: + - "worker" + - "manager" + example: "manager" + Availability: + description: "Availability of the node." + type: "string" + enum: + - "active" + - "pause" + - "drain" + example: "active" + example: + Availability: "active" + Name: "node-name" + Role: "manager" + Labels: + foo: "bar" + + Node: + type: "object" + properties: + ID: + type: "string" + example: "24ifsmvkjbyhk" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + description: | + Date and time at which the node was added to the swarm in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + UpdatedAt: + description: | + Date and time at which the node was last updated in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2017-08-09T07:09:37.632105588Z" + Spec: + $ref: "#/definitions/NodeSpec" + Description: + $ref: "#/definitions/NodeDescription" + Status: + $ref: "#/definitions/NodeStatus" + ManagerStatus: + $ref: "#/definitions/ManagerStatus" + + NodeDescription: + description: | + NodeDescription encapsulates the properties of the Node as reported by the + agent. + type: "object" + properties: + Hostname: + type: "string" + example: "bf3067039e47" + Platform: + $ref: "#/definitions/Platform" + Resources: + $ref: "#/definitions/ResourceObject" + Engine: + $ref: "#/definitions/EngineDescription" + TLSInfo: + $ref: "#/definitions/TLSInfo" + + Platform: + description: | + Platform represents the platform (Arch/OS). + type: "object" + properties: + Architecture: + description: | + Architecture represents the hardware architecture (for example, + `x86_64`). + type: "string" + example: "x86_64" + OS: + description: | + OS represents the Operating System (for example, `linux` or `windows`). + type: "string" + example: "linux" + + EngineDescription: + description: "EngineDescription provides information about an engine." + type: "object" + properties: + EngineVersion: + type: "string" + example: "17.06.0" + Labels: + type: "object" + additionalProperties: + type: "string" + example: + foo: "bar" + Plugins: + type: "array" + items: + type: "object" + properties: + Type: + type: "string" + Name: + type: "string" + example: + - Type: "Log" + Name: "awslogs" + - Type: "Log" + Name: "fluentd" + - Type: "Log" + Name: "gcplogs" + - Type: "Log" + Name: "gelf" + - Type: "Log" + Name: "journald" + - Type: "Log" + Name: "json-file" + - Type: "Log" + Name: "splunk" + - Type: "Log" + Name: "syslog" + - Type: "Network" + Name: "bridge" + - Type: "Network" + Name: "host" + - Type: "Network" + Name: "ipvlan" + - Type: "Network" + Name: "macvlan" + - Type: "Network" + Name: "null" + - Type: "Network" + Name: "overlay" + - Type: "Volume" + Name: "local" + - Type: "Volume" + Name: "localhost:5000/vieux/sshfs:latest" + - Type: "Volume" + Name: "vieux/sshfs:latest" + + TLSInfo: + description: | + Information about the issuer of leaf TLS certificates and the trusted root + CA certificate. + type: "object" + properties: + TrustRoot: + description: | + The root CA certificate(s) that are used to validate leaf TLS + certificates. + type: "string" + CertIssuerSubject: + description: + The base64-url-safe-encoded raw subject bytes of the issuer. + type: "string" + CertIssuerPublicKey: + description: | + The base64-url-safe-encoded raw public key bytes of the issuer. + type: "string" + example: + TrustRoot: | + -----BEGIN CERTIFICATE----- + MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw + EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 + MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH + A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf + 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB + Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO + PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz + pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H + -----END CERTIFICATE----- + CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" + CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" + + NodeStatus: + description: | + NodeStatus represents the status of a node. + + It provides the current status of the node, as seen by the manager. + type: "object" + properties: + State: + $ref: "#/definitions/NodeState" + Message: + type: "string" + example: "" + Addr: + description: "IP address of the node." + type: "string" + example: "172.17.0.2" + + NodeState: + description: "NodeState represents the state of a node." + type: "string" + enum: + - "unknown" + - "down" + - "ready" + - "disconnected" + example: "ready" + + ManagerStatus: + description: | + ManagerStatus represents the status of a manager. + + It provides the current status of a node's manager component, if the node + is a manager. + x-nullable: true + type: "object" + properties: + Leader: + type: "boolean" + default: false + example: true + Reachability: + $ref: "#/definitions/Reachability" + Addr: + description: | + The IP address and port at which the manager is reachable. + type: "string" + example: "10.0.0.46:2377" + + Reachability: + description: "Reachability represents the reachability of a node." + type: "string" + enum: + - "unknown" + - "unreachable" + - "reachable" + example: "reachable" + + SwarmSpec: + description: "User modifiable swarm configuration." + type: "object" + properties: + Name: + description: "Name of the swarm." + type: "string" + example: "default" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.corp.type: "production" + com.example.corp.department: "engineering" + Orchestration: + description: "Orchestration configuration." + type: "object" + x-nullable: true + properties: + TaskHistoryRetentionLimit: + description: | + The number of historic tasks to keep per instance or node. If + negative, never remove completed or failed tasks. + type: "integer" + format: "int64" + example: 10 + Raft: + description: "Raft configuration." + type: "object" + properties: + SnapshotInterval: + description: "The number of log entries between snapshots." + type: "integer" + format: "uint64" + example: 10000 + KeepOldSnapshots: + description: | + The number of snapshots to keep beyond the current snapshot. + type: "integer" + format: "uint64" + LogEntriesForSlowFollowers: + description: | + The number of log entries to keep around to sync up slow followers + after a snapshot is created. + type: "integer" + format: "uint64" + example: 500 + ElectionTick: + description: | + The number of ticks that a follower will wait for a message from + the leader before becoming a candidate and starting an election. + `ElectionTick` must be greater than `HeartbeatTick`. + + A tick currently defaults to one second, so these translate + directly to seconds currently, but this is NOT guaranteed. + type: "integer" + example: 3 + HeartbeatTick: + description: | + The number of ticks between heartbeats. Every HeartbeatTick ticks, + the leader will send a heartbeat to the followers. + + A tick currently defaults to one second, so these translate + directly to seconds currently, but this is NOT guaranteed. + type: "integer" + example: 1 + Dispatcher: + description: "Dispatcher configuration." + type: "object" + x-nullable: true + properties: + HeartbeatPeriod: + description: | + The delay for an agent to send a heartbeat to the dispatcher. + type: "integer" + format: "int64" + example: 5000000000 + CAConfig: + description: "CA configuration." + type: "object" + x-nullable: true + properties: + NodeCertExpiry: + description: "The duration node certificates are issued for." + type: "integer" + format: "int64" + example: 7776000000000000 + ExternalCAs: + description: | + Configuration for forwarding signing requests to an external + certificate authority. + type: "array" + items: + type: "object" + properties: + Protocol: + description: | + Protocol for communication with the external CA (currently + only `cfssl` is supported). + type: "string" + enum: + - "cfssl" + default: "cfssl" + URL: + description: | + URL where certificate signing requests should be sent. + type: "string" + Options: + description: | + An object with key/value pairs that are interpreted as + protocol-specific options for the external CA driver. + type: "object" + additionalProperties: + type: "string" + CACert: + description: | + The root CA certificate (in PEM format) this external CA uses + to issue TLS certificates (assumed to be to the current swarm + root CA certificate if not provided). + type: "string" + SigningCACert: + description: | + The desired signing CA certificate for all swarm node TLS leaf + certificates, in PEM format. + type: "string" + SigningCAKey: + description: | + The desired signing CA key for all swarm node TLS leaf certificates, + in PEM format. + type: "string" + ForceRotate: + description: | + An integer whose purpose is to force swarm to generate a new + signing CA certificate and key, if none have been specified in + `SigningCACert` and `SigningCAKey` + format: "uint64" + type: "integer" + EncryptionConfig: + description: "Parameters related to encryption-at-rest." + type: "object" + properties: + AutoLockManagers: + description: | + If set, generate a key and use it to lock data stored on the + managers. + type: "boolean" + example: false + TaskDefaults: + description: "Defaults for creating tasks in this cluster." + type: "object" + properties: + LogDriver: + description: | + The log driver to use for tasks created in the orchestrator if + unspecified by a service. + + Updating this value only affects new tasks. Existing tasks continue + to use their previously configured log driver until recreated. + type: "object" + properties: + Name: + description: | + The log driver to use as a default for new tasks. + type: "string" + example: "json-file" + Options: + description: | + Driver-specific options for the selected log driver, specified + as key/value pairs. + type: "object" + additionalProperties: + type: "string" + example: + "max-file": "10" + "max-size": "100m" + + # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but + # without `JoinTokens`. + ClusterInfo: + description: | + ClusterInfo represents information about the swarm as is returned by the + "/info" endpoint. Join-tokens are not included. + x-nullable: true + type: "object" + properties: + ID: + description: "The ID of the swarm." + type: "string" + example: "abajmipo7b4xz5ip2nrla6b11" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + description: | + Date and time at which the swarm was initialised in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + UpdatedAt: + description: | + Date and time at which the swarm was last updated in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2017-08-09T07:09:37.632105588Z" + Spec: + $ref: "#/definitions/SwarmSpec" + TLSInfo: + $ref: "#/definitions/TLSInfo" + RootRotationInProgress: + description: | + Whether there is currently a root CA rotation in progress for the swarm + type: "boolean" + example: false + DataPathPort: + description: | + DataPathPort specifies the data path port number for data traffic. + Acceptable port range is 1024 to 49151. + If no port is set or is set to 0, the default port (4789) is used. + type: "integer" + format: "uint32" + default: 4789 + example: 4789 + DefaultAddrPool: + description: | + Default Address Pool specifies default subnet pools for global scope + networks. + type: "array" + items: + type: "string" + format: "CIDR" + example: ["10.10.0.0/16", "20.20.0.0/16"] + SubnetSize: + description: | + SubnetSize specifies the subnet size of the networks created from the + default subnet pool. + type: "integer" + format: "uint32" + maximum: 29 + default: 24 + example: 24 + + JoinTokens: + description: | + JoinTokens contains the tokens workers and managers need to join the swarm. + type: "object" + properties: + Worker: + description: | + The token workers can use to join the swarm. + type: "string" + example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" + Manager: + description: | + The token managers can use to join the swarm. + type: "string" + example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + + Swarm: + type: "object" + allOf: + - $ref: "#/definitions/ClusterInfo" + - type: "object" + properties: + JoinTokens: + $ref: "#/definitions/JoinTokens" + + TaskSpec: + description: "User modifiable task configuration." + type: "object" + properties: + PluginSpec: + type: "object" + description: | + Plugin spec for the service. *(Experimental release only.)* + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + properties: + Name: + description: "The name or 'alias' to use for the plugin." + type: "string" + Remote: + description: "The plugin image reference to use." + type: "string" + Disabled: + description: "Disable the plugin once scheduled." + type: "boolean" + PluginPrivilege: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + ContainerSpec: + type: "object" + description: | + Container spec for the service. + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + properties: + Image: + description: "The image name to use for the container" + type: "string" + Labels: + description: "User-defined key/value data." + type: "object" + additionalProperties: + type: "string" + Command: + description: "The command to be run in the image." + type: "array" + items: + type: "string" + Args: + description: "Arguments to the command." + type: "array" + items: + type: "string" + Hostname: + description: | + The hostname to use for the container, as a valid + [RFC 1123](https://tools.ietf.org/html/rfc1123) hostname. + type: "string" + Env: + description: | + A list of environment variables in the form `VAR=value`. + type: "array" + items: + type: "string" + Dir: + description: "The working directory for commands to run in." + type: "string" + User: + description: "The user inside the container." + type: "string" + Groups: + type: "array" + description: | + A list of additional groups that the container process will run as. + items: + type: "string" + Privileges: + type: "object" + description: "Security options for the container" + properties: + CredentialSpec: + type: "object" + description: "CredentialSpec for managed service account (Windows only)" + properties: + Config: + type: "string" + example: "0bt9dmxjvjiqermk6xrop3ekq" + description: | + Load credential spec from a Swarm Config with the given ID. + The specified config must also be present in the Configs + field with the Runtime property set. + +


+ + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + File: + type: "string" + example: "spec.json" + description: | + Load credential spec from this file. The file is read by + the daemon, and must be present in the `CredentialSpecs` + subdirectory in the docker data directory, which defaults + to `C:\ProgramData\Docker\` on Windows. + + For example, specifying `spec.json` loads + `C:\ProgramData\Docker\CredentialSpecs\spec.json`. + +


+ + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + Registry: + type: "string" + description: | + Load credential spec from this value in the Windows + registry. The specified registry value must be located in: + + `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` + +


+ + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + SELinuxContext: + type: "object" + description: "SELinux labels of the container" + properties: + Disable: + type: "boolean" + description: "Disable SELinux" + User: + type: "string" + description: "SELinux user label" + Role: + type: "string" + description: "SELinux role label" + Type: + type: "string" + description: "SELinux type label" + Level: + type: "string" + description: "SELinux level label" + Seccomp: + type: "object" + description: "Options for configuring seccomp on the container" + properties: + Mode: + type: "string" + enum: + - "default" + - "unconfined" + - "custom" + Profile: + description: "The custom seccomp profile as a json object" + type: "string" + AppArmor: + type: "object" + description: "Options for configuring AppArmor on the container" + properties: + Mode: + type: "string" + enum: + - "default" + - "disabled" + NoNewPrivileges: + type: "boolean" + description: "Configuration of the no_new_privs bit in the container" + + TTY: + description: "Whether a pseudo-TTY should be allocated." + type: "boolean" + OpenStdin: + description: "Open `stdin`" + type: "boolean" + ReadOnly: + description: "Mount the container's root filesystem as read only." + type: "boolean" + Mounts: + description: | + Specification for mounts to be added to containers created as part + of the service. + type: "array" + items: + $ref: "#/definitions/Mount" + StopSignal: + description: "Signal to stop the container." + type: "string" + StopGracePeriod: + description: | + Amount of time to wait for the container to terminate before + forcefully killing it. + type: "integer" + format: "int64" + HealthCheck: + $ref: "#/definitions/HealthConfig" + Hosts: + type: "array" + description: | + A list of hostname/IP mappings to add to the container's `hosts` + file. The format of extra hosts is specified in the + [hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html) + man page: + + IP_address canonical_hostname [aliases...] + items: + type: "string" + DNSConfig: + description: | + Specification for DNS related configurations in resolver configuration + file (`resolv.conf`). + type: "object" + properties: + Nameservers: + description: "The IP addresses of the name servers." + type: "array" + items: + type: "string" + Search: + description: "A search list for host-name lookup." + type: "array" + items: + type: "string" + Options: + description: | + A list of internal resolver variables to be modified (e.g., + `debug`, `ndots:3`, etc.). + type: "array" + items: + type: "string" + Secrets: + description: | + Secrets contains references to zero or more secrets that will be + exposed to the service. + type: "array" + items: + type: "object" + properties: + File: + description: | + File represents a specific target that is backed by a file. + type: "object" + properties: + Name: + description: | + Name represents the final filename in the filesystem. + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + SecretID: + description: | + SecretID represents the ID of the specific secret that we're + referencing. + type: "string" + SecretName: + description: | + SecretName is the name of the secret that this references, + but this is just provided for lookup/display purposes. The + secret in the reference will be identified by its ID. + type: "string" + OomScoreAdj: + type: "integer" + format: "int64" + description: | + An integer value containing the score given to the container in + order to tune OOM killer preferences. + example: 0 + Configs: + description: | + Configs contains references to zero or more configs that will be + exposed to the service. + type: "array" + items: + type: "object" + properties: + File: + description: | + File represents a specific target that is backed by a file. + +


+ + > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive + type: "object" + properties: + Name: + description: | + Name represents the final filename in the filesystem. + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + Runtime: + description: | + Runtime represents a target that is not mounted into the + container but is used by the task + +


+ + > **Note**: `Configs.File` and `Configs.Runtime` are mutually + > exclusive + type: "object" + ConfigID: + description: | + ConfigID represents the ID of the specific config that we're + referencing. + type: "string" + ConfigName: + description: | + ConfigName is the name of the config that this references, + but this is just provided for lookup/display purposes. The + config in the reference will be identified by its ID. + type: "string" + Isolation: + type: "string" + description: | + Isolation technology of the containers running the service. + (Windows only) + enum: + - "default" + - "process" + - "hyperv" + - "" + Init: + description: | + Run an init inside the container that forwards signals and reaps + processes. This field is omitted if empty, and the default (as + configured on the daemon) is used. + type: "boolean" + x-nullable: true + Sysctls: + description: | + Set kernel namedspaced parameters (sysctls) in the container. + The Sysctls option on services accepts the same sysctls as the + are supported on containers. Note that while the same sysctls are + supported, no guarantees or checks are made about their + suitability for a clustered environment, and it's up to the user + to determine whether a given sysctl will work properly in a + Service. + type: "object" + additionalProperties: + type: "string" + # This option is not used by Windows containers + CapabilityAdd: + type: "array" + description: | + A list of kernel capabilities to add to the default set + for the container. + items: + type: "string" + example: + - "CAP_NET_RAW" + - "CAP_SYS_ADMIN" + - "CAP_SYS_CHROOT" + - "CAP_SYSLOG" + CapabilityDrop: + type: "array" + description: | + A list of kernel capabilities to drop from the default set + for the container. + items: + type: "string" + example: + - "CAP_NET_RAW" + Ulimits: + description: | + A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + NetworkAttachmentSpec: + description: | + Read-only spec type for non-swarm containers attached to swarm overlay + networks. + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + type: "object" + properties: + ContainerID: + description: "ID of the container represented by this task" + type: "string" + Resources: + description: | + Resource requirements which apply to each individual container created + as part of the service. + type: "object" + properties: + Limits: + description: "Define resources limits." + $ref: "#/definitions/Limit" + Reservations: + description: "Define resources reservation." + $ref: "#/definitions/ResourceObject" + RestartPolicy: + description: | + Specification for the restart policy which applies to containers + created as part of this service. + type: "object" + properties: + Condition: + description: "Condition for restart." + type: "string" + enum: + - "none" + - "on-failure" + - "any" + Delay: + description: "Delay between restart attempts." + type: "integer" + format: "int64" + MaxAttempts: + description: | + Maximum attempts to restart a given container before giving up + (default value is 0, which is ignored). + type: "integer" + format: "int64" + default: 0 + Window: + description: | + Windows is the time window used to evaluate the restart policy + (default value is 0, which is unbounded). + type: "integer" + format: "int64" + default: 0 + Placement: + type: "object" + properties: + Constraints: + description: | + An array of constraint expressions to limit the set of nodes where + a task can be scheduled. Constraint expressions can either use a + _match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find + nodes that satisfy every expression (AND match). Constraints can + match node or Docker Engine labels as follows: + + node attribute | matches | example + ---------------------|--------------------------------|----------------------------------------------- + `node.id` | Node ID | `node.id==2ivku8v2gvtg4` + `node.hostname` | Node hostname | `node.hostname!=node-2` + `node.role` | Node role (`manager`/`worker`) | `node.role==manager` + `node.platform.os` | Node operating system | `node.platform.os==windows` + `node.platform.arch` | Node architecture | `node.platform.arch==x86_64` + `node.labels` | User-defined node labels | `node.labels.security==high` + `engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-24.04` + + `engine.labels` apply to Docker Engine labels like operating system, + drivers, etc. Swarm administrators add `node.labels` for operational + purposes by using the [`node update endpoint`](#operation/NodeUpdate). + + type: "array" + items: + type: "string" + example: + - "node.hostname!=node3.corp.example.com" + - "node.role!=manager" + - "node.labels.type==production" + - "node.platform.os==linux" + - "node.platform.arch==x86_64" + Preferences: + description: | + Preferences provide a way to make the scheduler aware of factors + such as topology. They are provided in order from highest to + lowest precedence. + type: "array" + items: + type: "object" + properties: + Spread: + type: "object" + properties: + SpreadDescriptor: + description: | + label descriptor, such as `engine.labels.az`. + type: "string" + example: + - Spread: + SpreadDescriptor: "node.labels.datacenter" + - Spread: + SpreadDescriptor: "node.labels.rack" + MaxReplicas: + description: | + Maximum number of replicas for per node (default value is 0, which + is unlimited) + type: "integer" + format: "int64" + default: 0 + Platforms: + description: | + Platforms stores all the platforms that the service's image can + run on. This field is used in the platform filter for scheduling. + If empty, then the platform filter is off, meaning there are no + scheduling restrictions. + type: "array" + items: + $ref: "#/definitions/Platform" + ForceUpdate: + description: | + A counter that triggers an update even if no relevant parameters have + been changed. + type: "integer" + Runtime: + description: | + Runtime is the type of runtime specified for the task executor. + type: "string" + Networks: + description: "Specifies which networks the service should attach to." + type: "array" + items: + $ref: "#/definitions/NetworkAttachmentConfig" + LogDriver: + description: | + Specifies the log driver to use for tasks created from this spec. If + not present, the default one for the swarm will be used, finally + falling back to the engine default if not specified. + type: "object" + properties: + Name: + type: "string" + Options: + type: "object" + additionalProperties: + type: "string" + + TaskState: + type: "string" + enum: + - "new" + - "allocated" + - "pending" + - "assigned" + - "accepted" + - "preparing" + - "ready" + - "starting" + - "running" + - "complete" + - "shutdown" + - "failed" + - "rejected" + - "remove" + - "orphaned" + + ContainerStatus: + type: "object" + description: "represents the status of a container." + properties: + ContainerID: + type: "string" + PID: + type: "integer" + ExitCode: + type: "integer" + + PortStatus: + type: "object" + description: "represents the port status of a task's host ports whose service has published host ports" + properties: + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + + TaskStatus: + type: "object" + description: "represents the status of a task." + properties: + Timestamp: + type: "string" + format: "dateTime" + State: + $ref: "#/definitions/TaskState" + Message: + type: "string" + Err: + type: "string" + ContainerStatus: + $ref: "#/definitions/ContainerStatus" + PortStatus: + $ref: "#/definitions/PortStatus" + + Task: + type: "object" + properties: + ID: + description: "The ID of the task." + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Name: + description: "Name of the task." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Spec: + $ref: "#/definitions/TaskSpec" + ServiceID: + description: "The ID of the service this task is part of." + type: "string" + Slot: + type: "integer" + NodeID: + description: "The ID of the node that this task is on." + type: "string" + AssignedGenericResources: + $ref: "#/definitions/GenericResources" + Status: + $ref: "#/definitions/TaskStatus" + DesiredState: + $ref: "#/definitions/TaskState" + JobIteration: + description: | + If the Service this Task belongs to is a job-mode service, contains + the JobIteration of the Service this Task was created for. Absent if + the Task was created for a Replicated or Global Service. + $ref: "#/definitions/ObjectVersion" + example: + ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + AssignedGenericResources: + - DiscreteResourceSpec: + Kind: "SSD" + Value: 3 + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID1" + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID2" + + ServiceSpec: + description: "User modifiable configuration for a service." + type: object + properties: + Name: + description: "Name of the service." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + TaskTemplate: + $ref: "#/definitions/TaskSpec" + Mode: + description: "Scheduling mode for the service." + type: "object" + properties: + Replicated: + type: "object" + properties: + Replicas: + type: "integer" + format: "int64" + Global: + type: "object" + ReplicatedJob: + description: | + The mode used for services with a finite number of tasks that run + to a completed state. + type: "object" + properties: + MaxConcurrent: + description: | + The maximum number of replicas to run simultaneously. + type: "integer" + format: "int64" + default: 1 + TotalCompletions: + description: | + The total number of replicas desired to reach the Completed + state. If unset, will default to the value of `MaxConcurrent` + type: "integer" + format: "int64" + GlobalJob: + description: | + The mode used for services which run a task to the completed state + on each valid node. + type: "object" + UpdateConfig: + description: "Specification for the update strategy of the service." + type: "object" + properties: + Parallelism: + description: | + Maximum number of tasks to be updated in one iteration (0 means + unlimited parallelism). + type: "integer" + format: "int64" + Delay: + description: "Amount of time between updates, in nanoseconds." + type: "integer" + format: "int64" + FailureAction: + description: | + Action to take if an updated task fails to run, or stops running + during the update. + type: "string" + enum: + - "continue" + - "pause" + - "rollback" + Monitor: + description: | + Amount of time to monitor each updated task for failures, in + nanoseconds. + type: "integer" + format: "int64" + MaxFailureRatio: + description: | + The fraction of tasks that may fail during an update before the + failure action is invoked, specified as a floating point number + between 0 and 1. + type: "number" + default: 0 + Order: + description: | + The order of operations when rolling out an updated task. Either + the old task is shut down before the new task is started, or the + new task is started before the old task is shut down. + type: "string" + enum: + - "stop-first" + - "start-first" + RollbackConfig: + description: "Specification for the rollback strategy of the service." + type: "object" + properties: + Parallelism: + description: | + Maximum number of tasks to be rolled back in one iteration (0 means + unlimited parallelism). + type: "integer" + format: "int64" + Delay: + description: | + Amount of time between rollback iterations, in nanoseconds. + type: "integer" + format: "int64" + FailureAction: + description: | + Action to take if an rolled back task fails to run, or stops + running during the rollback. + type: "string" + enum: + - "continue" + - "pause" + Monitor: + description: | + Amount of time to monitor each rolled back task for failures, in + nanoseconds. + type: "integer" + format: "int64" + MaxFailureRatio: + description: | + The fraction of tasks that may fail during a rollback before the + failure action is invoked, specified as a floating point number + between 0 and 1. + type: "number" + default: 0 + Order: + description: | + The order of operations when rolling back a task. Either the old + task is shut down before the new task is started, or the new task + is started before the old task is shut down. + type: "string" + enum: + - "stop-first" + - "start-first" + Networks: + description: | + Specifies which networks the service should attach to. + + Deprecated: This field is deprecated since v1.44. The Networks field in TaskSpec should be used instead. + type: "array" + items: + $ref: "#/definitions/NetworkAttachmentConfig" + + EndpointSpec: + $ref: "#/definitions/EndpointSpec" + + EndpointPortConfig: + type: "object" + properties: + Name: + type: "string" + Protocol: + type: "string" + enum: + - "tcp" + - "udp" + - "sctp" + TargetPort: + description: "The port inside the container." + type: "integer" + PublishedPort: + description: "The port on the swarm hosts." + type: "integer" + PublishMode: + description: | + The mode in which port is published. + +


+ + - "ingress" makes the target port accessible on every node, + regardless of whether there is a task for the service running on + that node or not. + - "host" bypasses the routing mesh and publish the port directly on + the swarm node where that service is running. + + type: "string" + enum: + - "ingress" + - "host" + default: "ingress" + example: "ingress" + + EndpointSpec: + description: "Properties that can be configured to access and load balance a service." + type: "object" + properties: + Mode: + description: | + The mode of resolution to use for internal load balancing between tasks. + type: "string" + enum: + - "vip" + - "dnsrr" + default: "vip" + Ports: + description: | + List of exposed ports that this service is accessible on from the + outside. Ports can only be provided if `vip` resolution mode is used. + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + + Service: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ServiceSpec" + Endpoint: + type: "object" + properties: + Spec: + $ref: "#/definitions/EndpointSpec" + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + VirtualIPs: + type: "array" + items: + type: "object" + properties: + NetworkID: + type: "string" + Addr: + type: "string" + UpdateStatus: + description: "The status of a service update." + type: "object" + properties: + State: + type: "string" + enum: + - "updating" + - "paused" + - "completed" + StartedAt: + type: "string" + format: "dateTime" + CompletedAt: + type: "string" + format: "dateTime" + Message: + type: "string" + ServiceStatus: + description: | + The status of the service's tasks. Provided only when requested as + part of a ServiceList operation. + type: "object" + properties: + RunningTasks: + description: | + The number of tasks for the service currently in the Running state. + type: "integer" + format: "uint64" + example: 7 + DesiredTasks: + description: | + The number of tasks for the service desired to be running. + For replicated services, this is the replica count from the + service spec. For global services, this is computed by taking + count of all tasks for the service with a Desired State other + than Shutdown. + type: "integer" + format: "uint64" + example: 10 + CompletedTasks: + description: | + The number of tasks for a job that are in the Completed state. + This field must be cross-referenced with the service type, as the + value of 0 may mean the service is not in a job mode, or it may + mean the job-mode service has no tasks yet Completed. + type: "integer" + format: "uint64" + JobStatus: + description: | + The status of the service when it is in one of ReplicatedJob or + GlobalJob modes. Absent on Replicated and Global mode services. The + JobIteration is an ObjectVersion, but unlike the Service's version, + does not need to be sent with an update request. + type: "object" + properties: + JobIteration: + description: | + JobIteration is a value increased each time a Job is executed, + successfully or otherwise. "Executed", in this case, means the + job as a whole has been started, not that an individual Task has + been launched. A job is "Executed" when its ServiceSpec is + updated. JobIteration can be used to disambiguate Tasks belonging + to different executions of a job. Though JobIteration will + increase with each subsequent execution, it may not necessarily + increase by 1, and so JobIteration should not be used to + $ref: "#/definitions/ObjectVersion" + LastExecution: + description: | + The last time, as observed by the server, that this job was + started. + type: "string" + format: "dateTime" + example: + ID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Version: + Index: 19 + CreatedAt: "2016-06-07T21:05:51.880065305Z" + UpdatedAt: "2016-06-07T21:07:29.962229872Z" + Spec: + Name: "hopeful_cori" + TaskTemplate: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Endpoint: + Spec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + VirtualIPs: + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.2/16" + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.3/16" + + ImageDeleteResponseItem: + type: "object" + x-go-name: "DeleteResponse" + properties: + Untagged: + description: "The image ID of an image that was untagged" + type: "string" + Deleted: + description: "The image ID of an image that was deleted" + type: "string" + + ServiceCreateResponse: + type: "object" + description: | + contains the information returned to a client on the + creation of a new service. + properties: + ID: + description: "The ID of the created service." + type: "string" + x-nullable: false + example: "ak7w3gjqoa3kuz8xcpnyy0pvl" + Warnings: + description: | + Optional warning message. + + FIXME(thaJeztah): this should have "omitempty" in the generated type. + type: "array" + x-nullable: true + items: + type: "string" + example: + - "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + + ServiceUpdateResponse: + type: "object" + properties: + Warnings: + description: "Optional warning messages" + type: "array" + items: + type: "string" + example: + Warnings: + - "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + + ContainerInspectResponse: + type: "object" + title: "ContainerInspectResponse" + x-go-name: "InspectResponse" + properties: + Id: + description: |- + The ID of this container as a 128-bit (64-character) hexadecimal string (32 bytes). + type: "string" + x-go-name: "ID" + minLength: 64 + maxLength: 64 + pattern: "^[0-9a-fA-F]{64}$" + example: "aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf" + Created: + description: |- + Date and time at which the container was created, formatted in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + x-nullable: true + example: "2025-02-17T17:43:39.64001363Z" + Path: + description: |- + The path to the command being run + type: "string" + example: "/bin/sh" + Args: + description: "The arguments to the command being run" + type: "array" + items: + type: "string" + example: + - "-c" + - "exit 9" + State: + $ref: "#/definitions/ContainerState" + Image: + description: |- + The ID (digest) of the image that this container was created from. + type: "string" + example: "sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782" + ResolvConfPath: + description: |- + Location of the `/etc/resolv.conf` generated for the container on the + host. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/resolv.conf" + HostnamePath: + description: |- + Location of the `/etc/hostname` generated for the container on the + host. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/hostname" + HostsPath: + description: |- + Location of the `/etc/hosts` generated for the container on the + host. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/hosts" + LogPath: + description: |- + Location of the file used to buffer the container's logs. Depending on + the logging-driver used for the container, this field may be omitted. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + x-nullable: true + example: "/var/lib/docker/containers/5b7c7e2b992aa426584ce6c47452756066be0e503a08b4516a433a54d2f69e59/5b7c7e2b992aa426584ce6c47452756066be0e503a08b4516a433a54d2f69e59-json.log" + Name: + description: |- + The name associated with this container. + + For historic reasons, the name may be prefixed with a forward-slash (`/`). + type: "string" + example: "/funny_chatelet" + RestartCount: + description: |- + Number of times the container was restarted since it was created, + or since daemon was started. + type: "integer" + example: 0 + Driver: + description: |- + The storage-driver used for the container's filesystem (graph-driver + or snapshotter). + type: "string" + example: "overlayfs" + Platform: + description: |- + The platform (operating system) for which the container was created. + + This field was introduced for the experimental "LCOW" (Linux Containers + On Windows) features, which has been removed. In most cases, this field + is equal to the host's operating system (`linux` or `windows`). + type: "string" + example: "linux" + ImageManifestDescriptor: + $ref: "#/definitions/OCIDescriptor" + description: |- + OCI descriptor of the platform-specific manifest of the image + the container was created from. + + Note: Only available if the daemon provides a multi-platform + image store. + MountLabel: + description: |- + SELinux mount label set for the container. + type: "string" + example: "" + ProcessLabel: + description: |- + SELinux process label set for the container. + type: "string" + example: "" + AppArmorProfile: + description: |- + The AppArmor profile set for the container. + type: "string" + example: "" + ExecIDs: + description: |- + IDs of exec instances that are running in the container. + type: "array" + items: + type: "string" + x-nullable: true + example: + - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" + - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" + HostConfig: + $ref: "#/definitions/HostConfig" + GraphDriver: + $ref: "#/definitions/DriverData" + SizeRw: + description: |- + The size of files that have been created or changed by this container. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "122880" + SizeRootFs: + description: |- + The total size of all files in the read-only layers from the image + that the container uses. These layers can be shared between containers. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "1653948416" + Mounts: + description: |- + List of mounts used by the container. + type: "array" + items: + $ref: "#/definitions/MountPoint" + Config: + $ref: "#/definitions/ContainerConfig" + NetworkSettings: + $ref: "#/definitions/NetworkSettings" + + ContainerSummary: + type: "object" + properties: + Id: + description: |- + The ID of this container as a 128-bit (64-character) hexadecimal string (32 bytes). + type: "string" + x-go-name: "ID" + minLength: 64 + maxLength: 64 + pattern: "^[0-9a-fA-F]{64}$" + example: "aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf" + Names: + description: |- + The names associated with this container. Most containers have a single + name, but when using legacy "links", the container can have multiple + names. + + For historic reasons, names are prefixed with a forward-slash (`/`). + type: "array" + items: + type: "string" + example: + - "/funny_chatelet" + Image: + description: |- + The name or ID of the image used to create the container. + + This field shows the image reference as was specified when creating the container, + which can be in its canonical form (e.g., `docker.io/library/ubuntu:latest` + or `docker.io/library/ubuntu@sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782`), + short form (e.g., `ubuntu:latest`)), or the ID(-prefix) of the image (e.g., `72297848456d`). + + The content of this field can be updated at runtime if the image used to + create the container is untagged, in which case the field is updated to + contain the the image ID (digest) it was resolved to in its canonical, + non-truncated form (e.g., `sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782`). + type: "string" + example: "docker.io/library/ubuntu:latest" + ImageID: + description: |- + The ID (digest) of the image that this container was created from. + type: "string" + example: "sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782" + ImageManifestDescriptor: + $ref: "#/definitions/OCIDescriptor" + x-nullable: true + description: | + OCI descriptor of the platform-specific manifest of the image + the container was created from. + + Note: Only available if the daemon provides a multi-platform + image store. + + This field is not populated in the `GET /system/df` endpoint. + Command: + description: "Command to run when starting the container" + type: "string" + example: "/bin/bash" + Created: + description: |- + Date and time at which the container was created as a Unix timestamp + (number of seconds since EPOCH). + type: "integer" + format: "int64" + example: "1739811096" + Ports: + description: |- + Port-mappings for the container. + type: "array" + items: + $ref: "#/definitions/Port" + SizeRw: + description: |- + The size of files that have been created or changed by this container. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "122880" + SizeRootFs: + description: |- + The total size of all files in the read-only layers from the image + that the container uses. These layers can be shared between containers. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "1653948416" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + State: + description: | + The state of this container. + type: "string" + enum: + - "created" + - "running" + - "paused" + - "restarting" + - "exited" + - "removing" + - "dead" + example: "running" + Status: + description: |- + Additional human-readable status of this container (e.g. `Exit 0`) + type: "string" + example: "Up 4 days" + HostConfig: + type: "object" + description: |- + Summary of host-specific runtime information of the container. This + is a reduced set of information in the container's "HostConfig" as + available in the container "inspect" response. + properties: + NetworkMode: + description: |- + Networking mode (`host`, `none`, `container:`) or name of the + primary network the container is using. + + This field is primarily for backward compatibility. The container + can be connected to multiple networks for which information can be + found in the `NetworkSettings.Networks` field, which enumerates + settings per network. + type: "string" + example: "mynetwork" + Annotations: + description: |- + Arbitrary key-value metadata attached to the container. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + io.kubernetes.docker.type: "container" + io.kubernetes.sandbox.id: "3befe639bed0fd6afdd65fd1fa84506756f59360ec4adc270b0fdac9be22b4d3" + NetworkSettings: + description: |- + Summary of the container's network settings + type: "object" + properties: + Networks: + type: "object" + description: |- + Summary of network-settings for each network the container is + attached to. + additionalProperties: + $ref: "#/definitions/EndpointSettings" + Mounts: + type: "array" + description: |- + List of mounts used by the container. + items: + $ref: "#/definitions/MountPoint" + + Driver: + description: "Driver represents a driver (network, logging, secrets)." + type: "object" + required: [Name] + properties: + Name: + description: "Name of the driver." + type: "string" + x-nullable: false + example: "some-driver" + Options: + description: "Key/value map of driver-specific options." + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: + OptionA: "value for driver-specific option A" + OptionB: "value for driver-specific option B" + + SecretSpec: + type: "object" + properties: + Name: + description: "User-defined name of the secret." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Data: + description: | + Data is the data to store as a secret, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + It must be empty if the Driver field is set, in which case the data is + loaded from an external secret store. The maximum allowed size is 500KB, + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). + + This field is only used to _create_ a secret, and is not returned by + other endpoints. + type: "string" + example: "" + Driver: + description: | + Name of the secrets driver used to fetch the secret's value from an + external secret store. + $ref: "#/definitions/Driver" + Templating: + description: | + Templating driver, if applicable + + Templating controls whether and how to evaluate the config payload as + a template. If no driver is set, no templating is used. + $ref: "#/definitions/Driver" + + Secret: + type: "object" + properties: + ID: + type: "string" + example: "blt1owaxmitz71s9v5zh81zun" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + example: "2017-07-20T13:55:28.678958722Z" + UpdatedAt: + type: "string" + format: "dateTime" + example: "2017-07-20T13:55:28.678958722Z" + Spec: + $ref: "#/definitions/SecretSpec" + + ConfigSpec: + type: "object" + properties: + Name: + description: "User-defined name of the config." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Data: + description: | + Data is the data to store as a config, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). + type: "string" + Templating: + description: | + Templating driver, if applicable + + Templating controls whether and how to evaluate the config payload as + a template. If no driver is set, no templating is used. + $ref: "#/definitions/Driver" + + Config: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ConfigSpec" + + ContainerState: + description: | + ContainerState stores container's running state. It's part of ContainerJSONBase + and will be returned by the "inspect" command. + type: "object" + x-nullable: true + properties: + Status: + description: | + String representation of the container state. Can be one of "created", + "running", "paused", "restarting", "removing", "exited", or "dead". + type: "string" + enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"] + example: "running" + Running: + description: | + Whether this container is running. + + Note that a running container can be _paused_. The `Running` and `Paused` + booleans are not mutually exclusive: + + When pausing a container (on Linux), the freezer cgroup is used to suspend + all processes in the container. Freezing the process requires the process to + be running. As a result, paused containers are both `Running` _and_ `Paused`. + + Use the `Status` field instead to determine if a container's state is "running". + type: "boolean" + example: true + Paused: + description: "Whether this container is paused." + type: "boolean" + example: false + Restarting: + description: "Whether this container is restarting." + type: "boolean" + example: false + OOMKilled: + description: | + Whether a process within this container has been killed because it ran + out of memory since the container was last started. + type: "boolean" + example: false + Dead: + type: "boolean" + example: false + Pid: + description: "The process ID of this container" + type: "integer" + example: 1234 + ExitCode: + description: "The last exit code of this container" + type: "integer" + example: 0 + Error: + type: "string" + StartedAt: + description: "The time when this container was last started." + type: "string" + example: "2020-01-06T09:06:59.461876391Z" + FinishedAt: + description: "The time when this container last exited." + type: "string" + example: "2020-01-06T09:07:59.461876391Z" + Health: + $ref: "#/definitions/Health" + + ContainerCreateResponse: + description: "OK response to ContainerCreate operation" + type: "object" + title: "ContainerCreateResponse" + x-go-name: "CreateResponse" + required: [Id, Warnings] + properties: + Id: + description: "The ID of the created container" + type: "string" + x-nullable: false + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Warnings: + description: "Warnings encountered when creating the container" + type: "array" + x-nullable: false + items: + type: "string" + example: [] + + ContainerUpdateResponse: + type: "object" + title: "ContainerUpdateResponse" + x-go-name: "UpdateResponse" + description: |- + Response for a successful container-update. + properties: + Warnings: + type: "array" + description: |- + Warnings encountered when updating the container. + items: + type: "string" + example: ["Published ports are discarded when using host network mode"] + + ContainerStatsResponse: + description: | + Statistics sample for a container. + type: "object" + x-go-name: "StatsResponse" + title: "ContainerStatsResponse" + properties: + name: + description: "Name of the container" + type: "string" + x-nullable: true + example: "boring_wozniak" + id: + description: "ID of the container" + type: "string" + x-nullable: true + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + read: + description: | + Date and time at which this sample was collected. + The value is formatted as [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + with nano-seconds. + type: "string" + format: "date-time" + example: "2025-01-16T13:55:22.165243637Z" + preread: + description: | + Date and time at which this first sample was collected. This field + is not propagated if the "one-shot" option is set. If the "one-shot" + option is set, this field may be omitted, empty, or set to a default + date (`0001-01-01T00:00:00Z`). + + The value is formatted as [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + with nano-seconds. + type: "string" + format: "date-time" + example: "2025-01-16T13:55:21.160452595Z" + pids_stats: + $ref: "#/definitions/ContainerPidsStats" + blkio_stats: + $ref: "#/definitions/ContainerBlkioStats" + num_procs: + description: | + The number of processors on the system. + + This field is Windows-specific and always zero for Linux containers. + type: "integer" + format: "uint32" + example: 16 + storage_stats: + $ref: "#/definitions/ContainerStorageStats" + cpu_stats: + $ref: "#/definitions/ContainerCPUStats" + precpu_stats: + $ref: "#/definitions/ContainerCPUStats" + memory_stats: + $ref: "#/definitions/ContainerMemoryStats" + networks: + description: | + Network statistics for the container per interface. + + This field is omitted if the container has no networking enabled. + x-nullable: true + additionalProperties: + $ref: "#/definitions/ContainerNetworkStats" + example: + eth0: + rx_bytes: 5338 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 36 + tx_bytes: 648 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 8 + eth5: + rx_bytes: 4641 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 26 + tx_bytes: 690 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 9 + + ContainerBlkioStats: + description: | + BlkioStats stores all IO service stats for data read and write. + + This type is Linux-specific and holds many fields that are specific to cgroups v1. + On a cgroup v2 host, all fields other than `io_service_bytes_recursive` + are omitted or `null`. + + This type is only populated on Linux and omitted for Windows containers. + type: "object" + x-go-name: "BlkioStats" + x-nullable: true + properties: + io_service_bytes_recursive: + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_serviced_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_queue_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_service_time_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_wait_time_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_merged_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_time_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + sectors_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + example: + io_service_bytes_recursive: [ + {"major": 254, "minor": 0, "op": "read", "value": 7593984}, + {"major": 254, "minor": 0, "op": "write", "value": 100} + ] + io_serviced_recursive: null + io_queue_recursive: null + io_service_time_recursive: null + io_wait_time_recursive: null + io_merged_recursive: null + io_time_recursive: null + sectors_recursive: null + + ContainerBlkioStatEntry: + description: | + Blkio stats entry. + + This type is Linux-specific and omitted for Windows containers. + type: "object" + x-go-name: "BlkioStatEntry" + x-nullable: true + properties: + major: + type: "integer" + format: "uint64" + example: 254 + minor: + type: "integer" + format: "uint64" + example: 0 + op: + type: "string" + example: "read" + value: + type: "integer" + format: "uint64" + example: 7593984 + + ContainerCPUStats: + description: | + CPU related info of the container + type: "object" + x-go-name: "CPUStats" + x-nullable: true + properties: + cpu_usage: + $ref: "#/definitions/ContainerCPUUsage" + system_cpu_usage: + description: | + System Usage. + + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 5 + online_cpus: + description: | + Number of online CPUs. + + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint32" + x-nullable: true + example: 5 + throttling_data: + $ref: "#/definitions/ContainerThrottlingData" + + ContainerCPUUsage: + description: | + All CPU stats aggregated since container inception. + type: "object" + x-go-name: "CPUUsage" + x-nullable: true + properties: + total_usage: + description: | + Total CPU time consumed in nanoseconds (Linux) or 100's of nanoseconds (Windows). + type: "integer" + format: "uint64" + example: 29912000 + percpu_usage: + description: | + Total CPU time (in nanoseconds) consumed per core (Linux). + + This field is Linux-specific when using cgroups v1. It is omitted + when using cgroups v2 and Windows containers. + type: "array" + x-nullable: true + items: + type: "integer" + format: "uint64" + example: 29912000 + + usage_in_kernelmode: + description: | + Time (in nanoseconds) spent by tasks of the cgroup in kernel mode (Linux), + or time spent (in 100's of nanoseconds) by all container processes in + kernel mode (Windows). + + Not populated for Windows containers using Hyper-V isolation. + type: "integer" + format: "uint64" + example: 21994000 + usage_in_usermode: + description: | + Time (in nanoseconds) spent by tasks of the cgroup in user mode (Linux), + or time spent (in 100's of nanoseconds) by all container processes in + kernel mode (Windows). + + Not populated for Windows containers using Hyper-V isolation. + type: "integer" + format: "uint64" + example: 7918000 + + ContainerPidsStats: + description: | + PidsStats contains Linux-specific stats of a container's process-IDs (PIDs). + + This type is Linux-specific and omitted for Windows containers. + type: "object" + x-go-name: "PidsStats" + x-nullable: true + properties: + current: + description: | + Current is the number of PIDs in the cgroup. + type: "integer" + format: "uint64" + x-nullable: true + example: 5 + limit: + description: | + Limit is the hard limit on the number of pids in the cgroup. + A "Limit" of 0 means that there is no limit. + type: "integer" + format: "uint64" + x-nullable: true + example: "18446744073709551615" + + ContainerThrottlingData: + description: | + CPU throttling stats of the container. + + This type is Linux-specific and omitted for Windows containers. + type: "object" + x-go-name: "ThrottlingData" + x-nullable: true + properties: + periods: + description: | + Number of periods with throttling active. + type: "integer" + format: "uint64" + example: 0 + throttled_periods: + description: | + Number of periods when the container hit its throttling limit. + type: "integer" + format: "uint64" + example: 0 + throttled_time: + description: | + Aggregated time (in nanoseconds) the container was throttled for. + type: "integer" + format: "uint64" + example: 0 + + ContainerMemoryStats: + description: | + Aggregates all memory stats since container inception on Linux. + Windows returns stats for commit and private working set only. + type: "object" + x-go-name: "MemoryStats" + properties: + usage: + description: | + Current `res_counter` usage for memory. + + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + max_usage: + description: | + Maximum usage ever recorded. + + This field is Linux-specific and only supported on cgroups v1. + It is omitted when using cgroups v2 and for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + stats: + description: | + All the stats exported via memory.stat. when using cgroups v2. + + This field is Linux-specific and omitted for Windows containers. + type: "object" + additionalProperties: + type: "integer" + format: "uint64" + x-nullable: true + example: + { + "active_anon": 1572864, + "active_file": 5115904, + "anon": 1572864, + "anon_thp": 0, + "file": 7626752, + "file_dirty": 0, + "file_mapped": 2723840, + "file_writeback": 0, + "inactive_anon": 0, + "inactive_file": 2510848, + "kernel_stack": 16384, + "pgactivate": 0, + "pgdeactivate": 0, + "pgfault": 2042, + "pglazyfree": 0, + "pglazyfreed": 0, + "pgmajfault": 45, + "pgrefill": 0, + "pgscan": 0, + "pgsteal": 0, + "shmem": 0, + "slab": 1180928, + "slab_reclaimable": 725576, + "slab_unreclaimable": 455352, + "sock": 0, + "thp_collapse_alloc": 0, + "thp_fault_alloc": 1, + "unevictable": 0, + "workingset_activate": 0, + "workingset_nodereclaim": 0, + "workingset_refault": 0 + } + failcnt: + description: | + Number of times memory usage hits limits. + + This field is Linux-specific and only supported on cgroups v1. + It is omitted when using cgroups v2 and for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + limit: + description: | + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 8217579520 + commitbytes: + description: | + Committed bytes. + + This field is Windows-specific and omitted for Linux containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + commitpeakbytes: + description: | + Peak committed bytes. + + This field is Windows-specific and omitted for Linux containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + privateworkingset: + description: | + Private working set. + + This field is Windows-specific and omitted for Linux containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + + ContainerNetworkStats: + description: | + Aggregates the network stats of one container + type: "object" + x-go-name: "NetworkStats" + x-nullable: true + properties: + rx_bytes: + description: | + Bytes received. Windows and Linux. + type: "integer" + format: "uint64" + example: 5338 + rx_packets: + description: | + Packets received. Windows and Linux. + type: "integer" + format: "uint64" + example: 36 + rx_errors: + description: | + Received errors. Not used on Windows. + + This field is Linux-specific and always zero for Windows containers. + type: "integer" + format: "uint64" + example: 0 + rx_dropped: + description: | + Incoming packets dropped. Windows and Linux. + type: "integer" + format: "uint64" + example: 0 + tx_bytes: + description: | + Bytes sent. Windows and Linux. + type: "integer" + format: "uint64" + example: 1200 + tx_packets: + description: | + Packets sent. Windows and Linux. + type: "integer" + format: "uint64" + example: 12 + tx_errors: + description: | + Sent errors. Not used on Windows. + + This field is Linux-specific and always zero for Windows containers. + type: "integer" + format: "uint64" + example: 0 + tx_dropped: + description: | + Outgoing packets dropped. Windows and Linux. + type: "integer" + format: "uint64" + example: 0 + endpoint_id: + description: | + Endpoint ID. Not used on Linux. + + This field is Windows-specific and omitted for Linux containers. + type: "string" + x-nullable: true + instance_id: + description: | + Instance ID. Not used on Linux. + + This field is Windows-specific and omitted for Linux containers. + type: "string" + x-nullable: true + + ContainerStorageStats: + description: | + StorageStats is the disk I/O stats for read/write on Windows. + + This type is Windows-specific and omitted for Linux containers. + type: "object" + x-go-name: "StorageStats" + x-nullable: true + properties: + read_count_normalized: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + read_size_bytes: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + write_count_normalized: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + write_size_bytes: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + + ContainerTopResponse: + type: "object" + x-go-name: "TopResponse" + title: "ContainerTopResponse" + description: |- + Container "top" response. + properties: + Titles: + description: "The ps column titles" + type: "array" + items: + type: "string" + example: + Titles: + - "UID" + - "PID" + - "PPID" + - "C" + - "STIME" + - "TTY" + - "TIME" + - "CMD" + Processes: + description: |- + Each process running in the container, where each process + is an array of values corresponding to the titles. + type: "array" + items: + type: "array" + items: + type: "string" + example: + Processes: + - + - "root" + - "13642" + - "882" + - "0" + - "17:03" + - "pts/0" + - "00:00:00" + - "/bin/bash" + - + - "root" + - "13735" + - "13642" + - "0" + - "17:06" + - "pts/0" + - "00:00:00" + - "sleep 10" + + ContainerWaitResponse: + description: "OK response to ContainerWait operation" + type: "object" + x-go-name: "WaitResponse" + title: "ContainerWaitResponse" + required: [StatusCode] + properties: + StatusCode: + description: "Exit code of the container" + type: "integer" + format: "int64" + x-nullable: false + Error: + $ref: "#/definitions/ContainerWaitExitError" + + ContainerWaitExitError: + description: "container waiting error, if any" + type: "object" + x-go-name: "WaitExitError" + properties: + Message: + description: "Details of an error" + type: "string" + + SystemVersion: + type: "object" + description: | + Response of Engine API: GET "/version" + properties: + Platform: + type: "object" + required: [Name] + properties: + Name: + type: "string" + Components: + type: "array" + description: | + Information about system components + items: + type: "object" + x-go-name: ComponentVersion + required: [Name, Version] + properties: + Name: + description: | + Name of the component + type: "string" + example: "Engine" + Version: + description: | + Version of the component + type: "string" + x-nullable: false + example: "27.0.1" + Details: + description: | + Key/value pairs of strings with additional information about the + component. These values are intended for informational purposes + only, and their content is not defined, and not part of the API + specification. + + These messages can be printed by the client as information to the user. + type: "object" + x-nullable: true + Version: + description: "The version of the daemon" + type: "string" + example: "27.0.1" + ApiVersion: + description: | + The default (and highest) API version that is supported by the daemon + type: "string" + example: "1.47" + MinAPIVersion: + description: | + The minimum API version that is supported by the daemon + type: "string" + example: "1.24" + GitCommit: + description: | + The Git commit of the source code that was used to build the daemon + type: "string" + example: "48a66213fe" + GoVersion: + description: | + The version Go used to compile the daemon, and the version of the Go + runtime in use. + type: "string" + example: "go1.22.7" + Os: + description: | + The operating system that the daemon is running on ("linux" or "windows") + type: "string" + example: "linux" + Arch: + description: | + The architecture that the daemon is running on + type: "string" + example: "amd64" + KernelVersion: + description: | + The kernel version (`uname -r`) that the daemon is running on. + + This field is omitted when empty. + type: "string" + example: "6.8.0-31-generic" + Experimental: + description: | + Indicates if the daemon is started with experimental features enabled. + + This field is omitted when empty / false. + type: "boolean" + example: true + BuildTime: + description: | + The date and time that the daemon was compiled. + type: "string" + example: "2020-06-22T15:49:27.000000000+00:00" + + SystemInfo: + type: "object" + properties: + ID: + description: | + Unique identifier of the daemon. + +


+ + > **Note**: The format of the ID itself is not part of the API, and + > should not be considered stable. + type: "string" + example: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" + Containers: + description: "Total number of containers on the host." + type: "integer" + example: 14 + ContainersRunning: + description: | + Number of containers with status `"running"`. + type: "integer" + example: 3 + ContainersPaused: + description: | + Number of containers with status `"paused"`. + type: "integer" + example: 1 + ContainersStopped: + description: | + Number of containers with status `"stopped"`. + type: "integer" + example: 10 + Images: + description: | + Total number of images on the host. + + Both _tagged_ and _untagged_ (dangling) images are counted. + type: "integer" + example: 508 + Driver: + description: "Name of the storage driver in use." + type: "string" + example: "overlay2" + DriverStatus: + description: | + Information specific to the storage driver, provided as + "label" / "value" pairs. + + This information is provided by the storage driver, and formatted + in a way consistent with the output of `docker info` on the command + line. + +


+ + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "array" + items: + type: "array" + items: + type: "string" + example: + - ["Backing Filesystem", "extfs"] + - ["Supports d_type", "true"] + - ["Native Overlay Diff", "true"] + DockerRootDir: + description: | + Root directory of persistent Docker state. + + Defaults to `/var/lib/docker` on Linux, and `C:\ProgramData\docker` + on Windows. + type: "string" + example: "/var/lib/docker" + Plugins: + $ref: "#/definitions/PluginsInfo" + MemoryLimit: + description: "Indicates if the host has memory limit support enabled." + type: "boolean" + example: true + SwapLimit: + description: "Indicates if the host has memory swap limit support enabled." + type: "boolean" + example: true + KernelMemoryTCP: + description: | + Indicates if the host has kernel memory TCP limit support enabled. This + field is omitted if not supported. + + Kernel memory TCP limits are not supported when using cgroups v2, which + does not support the corresponding `memory.kmem.tcp.limit_in_bytes` cgroup. + type: "boolean" + example: true + CpuCfsPeriod: + description: | + Indicates if CPU CFS(Completely Fair Scheduler) period is supported by + the host. + type: "boolean" + example: true + CpuCfsQuota: + description: | + Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by + the host. + type: "boolean" + example: true + CPUShares: + description: | + Indicates if CPU Shares limiting is supported by the host. + type: "boolean" + example: true + CPUSet: + description: | + Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host. + + See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt) + type: "boolean" + example: true + PidsLimit: + description: "Indicates if the host kernel has PID limit support enabled." + type: "boolean" + example: true + OomKillDisable: + description: "Indicates if OOM killer disable is supported on the host." + type: "boolean" + IPv4Forwarding: + description: "Indicates IPv4 forwarding is enabled." + type: "boolean" + example: true + BridgeNfIptables: + description: | + Indicates if `bridge-nf-call-iptables` is available on the host when + the daemon was started. + +


+ + > **Deprecated**: netfilter module is now loaded on-demand and no longer + > during daemon startup, making this field obsolete. This field is always + > `false` and will be removed in a API v1.49. + type: "boolean" + example: false + BridgeNfIp6tables: + description: | + Indicates if `bridge-nf-call-ip6tables` is available on the host. + +


+ + > **Deprecated**: netfilter module is now loaded on-demand, and no longer + > during daemon startup, making this field obsolete. This field is always + > `false` and will be removed in a API v1.49. + type: "boolean" + example: false + Debug: + description: | + Indicates if the daemon is running in debug-mode / with debug-level + logging enabled. + type: "boolean" + example: true + NFd: + description: | + The total number of file Descriptors in use by the daemon process. + + This information is only returned if debug-mode is enabled. + type: "integer" + example: 64 + NGoroutines: + description: | + The number of goroutines that currently exist. + + This information is only returned if debug-mode is enabled. + type: "integer" + example: 174 + SystemTime: + description: | + Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + format with nano-seconds. + type: "string" + example: "2017-08-08T20:28:29.06202363Z" + LoggingDriver: + description: | + The logging driver to use as a default for new containers. + type: "string" + CgroupDriver: + description: | + The driver to use for managing cgroups. + type: "string" + enum: ["cgroupfs", "systemd", "none"] + default: "cgroupfs" + example: "cgroupfs" + CgroupVersion: + description: | + The version of the cgroup. + type: "string" + enum: ["1", "2"] + default: "1" + example: "1" + NEventsListener: + description: "Number of event listeners subscribed." + type: "integer" + example: 30 + KernelVersion: + description: | + Kernel version of the host. + + On Linux, this information obtained from `uname`. On Windows this + information is queried from the HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\ + registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_. + type: "string" + example: "6.8.0-31-generic" + OperatingSystem: + description: | + Name of the host's operating system, for example: "Ubuntu 24.04 LTS" + or "Windows Server 2016 Datacenter" + type: "string" + example: "Ubuntu 24.04 LTS" + OSVersion: + description: | + Version of the host's operating system + +


+ + > **Note**: The information returned in this field, including its + > very existence, and the formatting of values, should not be considered + > stable, and may change without notice. + type: "string" + example: "24.04" + OSType: + description: | + Generic type of the operating system of the host, as returned by the + Go runtime (`GOOS`). + + Currently returned values are "linux" and "windows". A full list of + possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + type: "string" + example: "linux" + Architecture: + description: | + Hardware architecture of the host, as returned by the Go runtime + (`GOARCH`). + + A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + type: "string" + example: "x86_64" + NCPU: + description: | + The number of logical CPUs usable by the daemon. + + The number of available CPUs is checked by querying the operating + system when the daemon starts. Changes to operating system CPU + allocation after the daemon is started are not reflected. + type: "integer" + example: 4 + MemTotal: + description: | + Total amount of physical memory available on the host, in bytes. + type: "integer" + format: "int64" + example: 2095882240 + + IndexServerAddress: + description: | + Address / URL of the index server that is used for image search, + and as a default for user authentication for Docker Hub and Docker Cloud. + default: "https://index.docker.io/v1/" + type: "string" + example: "https://index.docker.io/v1/" + RegistryConfig: + $ref: "#/definitions/RegistryServiceConfig" + GenericResources: + $ref: "#/definitions/GenericResources" + HttpProxy: + description: | + HTTP-proxy configured for the daemon. This value is obtained from the + [`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + are masked in the API response. + + Containers do not automatically inherit this configuration. + type: "string" + example: "http://xxxxx:xxxxx@proxy.corp.example.com:8080" + HttpsProxy: + description: | + HTTPS-proxy configured for the daemon. This value is obtained from the + [`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + are masked in the API response. + + Containers do not automatically inherit this configuration. + type: "string" + example: "https://xxxxx:xxxxx@proxy.corp.example.com:4443" + NoProxy: + description: | + Comma-separated list of domain extensions for which no proxy should be + used. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) + environment variable. + + Containers do not automatically inherit this configuration. + type: "string" + example: "*.local, 169.254/16" + Name: + description: "Hostname of the host." + type: "string" + example: "node5.corp.example.com" + Labels: + description: | + User-defined labels (key/value metadata) as set on the daemon. + +


+ + > **Note**: When part of a Swarm, nodes can both have _daemon_ labels, + > set through the daemon configuration, and _node_ labels, set from a + > manager node in the Swarm. Node labels are not included in this + > field. Node labels can be retrieved using the `/nodes/(id)` endpoint + > on a manager node in the Swarm. + type: "array" + items: + type: "string" + example: ["storage=ssd", "production"] + ExperimentalBuild: + description: | + Indicates if experimental features are enabled on the daemon. + type: "boolean" + example: true + ServerVersion: + description: | + Version string of the daemon. + type: "string" + example: "27.0.1" + Runtimes: + description: | + List of [OCI compliant](https://github.com/opencontainers/runtime-spec) + runtimes configured on the daemon. Keys hold the "name" used to + reference the runtime. + + The Docker daemon relies on an OCI compliant runtime (invoked via the + `containerd` daemon) as its interface to the Linux kernel namespaces, + cgroups, and SELinux. + + The default runtime is `runc`, and automatically configured. Additional + runtimes can be configured by the user and will be listed here. + type: "object" + additionalProperties: + $ref: "#/definitions/Runtime" + default: + runc: + path: "runc" + example: + runc: + path: "runc" + runc-master: + path: "/go/bin/runc" + custom: + path: "/usr/local/bin/my-oci-runtime" + runtimeArgs: ["--debug", "--systemd-cgroup=false"] + DefaultRuntime: + description: | + Name of the default OCI runtime that is used when starting containers. + + The default can be overridden per-container at create time. + type: "string" + default: "runc" + example: "runc" + Swarm: + $ref: "#/definitions/SwarmInfo" + LiveRestoreEnabled: + description: | + Indicates if live restore is enabled. + + If enabled, containers are kept running when the daemon is shutdown + or upon daemon start if running containers are detected. + type: "boolean" + default: false + example: false + Isolation: + description: | + Represents the isolation technology to use as a default for containers. + The supported values are platform-specific. + + If no isolation value is specified on daemon start, on Windows client, + the default is `hyperv`, and on Windows server, the default is `process`. + + This option is currently not used on other platforms. + default: "default" + type: "string" + enum: + - "default" + - "hyperv" + - "process" + - "" + InitBinary: + description: | + Name and, optional, path of the `docker-init` binary. + + If the path is omitted, the daemon searches the host's `$PATH` for the + binary and uses the first result. + type: "string" + example: "docker-init" + ContainerdCommit: + $ref: "#/definitions/Commit" + RuncCommit: + $ref: "#/definitions/Commit" + InitCommit: + $ref: "#/definitions/Commit" + SecurityOptions: + description: | + List of security features that are enabled on the daemon, such as + apparmor, seccomp, SELinux, user-namespaces (userns), rootless and + no-new-privileges. + + Additional configuration options for each security feature may + be present, and are included as a comma-separated list of key/value + pairs. + type: "array" + items: + type: "string" + example: + - "name=apparmor" + - "name=seccomp,profile=default" + - "name=selinux" + - "name=userns" + - "name=rootless" + ProductLicense: + description: | + Reports a summary of the product license on the daemon. + + If a commercial license has been applied to the daemon, information + such as number of nodes, and expiration are included. + type: "string" + example: "Community Engine" + DefaultAddressPools: + description: | + List of custom default address pools for local networks, which can be + specified in the daemon.json file or dockerd option. + + Example: a Base "10.10.0.0/16" with Size 24 will define the set of 256 + 10.10.[0-255].0/24 address pools. + type: "array" + items: + type: "object" + properties: + Base: + description: "The network address in CIDR format" + type: "string" + example: "10.10.0.0/16" + Size: + description: "The network pool size" + type: "integer" + example: "24" + Warnings: + description: | + List of warnings / informational messages about missing features, or + issues related to the daemon configuration. + + These messages can be printed by the client as information to the user. + type: "array" + items: + type: "string" + example: + - "WARNING: No memory limit support" + CDISpecDirs: + description: | + List of directories where (Container Device Interface) CDI + specifications are located. + + These specifications define vendor-specific modifications to an OCI + runtime specification for a container being created. + + An empty list indicates that CDI device injection is disabled. + + Note that since using CDI device injection requires the daemon to have + experimental enabled. For non-experimental daemons an empty list will + always be returned. + type: "array" + items: + type: "string" + example: + - "/etc/cdi" + - "/var/run/cdi" + Containerd: + $ref: "#/definitions/ContainerdInfo" + + ContainerdInfo: + description: | + Information for connecting to the containerd instance that is used by the daemon. + This is included for debugging purposes only. + type: "object" + x-nullable: true + properties: + Address: + description: "The address of the containerd socket." + type: "string" + example: "/run/containerd/containerd.sock" + Namespaces: + description: | + The namespaces that the daemon uses for running containers and + plugins in containerd. These namespaces can be configured in the + daemon configuration, and are considered to be used exclusively + by the daemon, Tampering with the containerd instance may cause + unexpected behavior. + + As these namespaces are considered to be exclusively accessed + by the daemon, it is not recommended to change these values, + or to change them to a value that is used by other systems, + such as cri-containerd. + type: "object" + properties: + Containers: + description: | + The default containerd namespace used for containers managed + by the daemon. + + The default namespace for containers is "moby", but will be + suffixed with the `.` of the remapped `root` if + user-namespaces are enabled and the containerd image-store + is used. + type: "string" + default: "moby" + example: "moby" + Plugins: + description: | + The default containerd namespace used for plugins managed by + the daemon. + + The default namespace for plugins is "plugins.moby", but will be + suffixed with the `.` of the remapped `root` if + user-namespaces are enabled and the containerd image-store + is used. + type: "string" + default: "plugins.moby" + example: "plugins.moby" + + # PluginsInfo is a temp struct holding Plugins name + # registered with docker daemon. It is used by Info struct + PluginsInfo: + description: | + Available plugins per type. + +


+ + > **Note**: Only unmanaged (V1) plugins are included in this list. + > V1 plugins are "lazily" loaded, and are not returned in this list + > if there is no resource using the plugin. + type: "object" + properties: + Volume: + description: "Names of available volume-drivers, and network-driver plugins." + type: "array" + items: + type: "string" + example: ["local"] + Network: + description: "Names of available network-drivers, and network-driver plugins." + type: "array" + items: + type: "string" + example: ["bridge", "host", "ipvlan", "macvlan", "null", "overlay"] + Authorization: + description: "Names of available authorization plugins." + type: "array" + items: + type: "string" + example: ["img-authz-plugin", "hbm"] + Log: + description: "Names of available logging-drivers, and logging-driver plugins." + type: "array" + items: + type: "string" + example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "splunk", "syslog"] + + + RegistryServiceConfig: + description: | + RegistryServiceConfig stores daemon registry services configuration. + type: "object" + x-nullable: true + properties: + AllowNondistributableArtifactsCIDRs: + description: | + List of IP ranges to which nondistributable artifacts can be pushed, + using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). + +


+ + > **Deprecated**: Pushing nondistributable artifacts is now always enabled + > and this field is always `null`. This field will be removed in a API v1.49. + type: "array" + items: + type: "string" + example: [] + AllowNondistributableArtifactsHostnames: + description: | + List of registry hostnames to which nondistributable artifacts can be + pushed, using the format `[:]` or `[:]`. + +


+ + > **Deprecated**: Pushing nondistributable artifacts is now always enabled + > and this field is always `null`. This field will be removed in a API v1.49. + type: "array" + items: + type: "string" + example: [] + InsecureRegistryCIDRs: + description: | + List of IP ranges of insecure registries, using the CIDR syntax + ([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries + accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates + from unknown CAs) communication. + + By default, local registries (`::1/128` and `127.0.0.0/8`) are configured as + insecure. All other registries are secure. Communicating with an + insecure registry is not possible if the daemon assumes that registry + is secure. + + This configuration override this behavior, insecure communication with + registries whose resolved IP address is within the subnet described by + the CIDR syntax. + + Registries can also be marked insecure by hostname. Those registries + are listed under `IndexConfigs` and have their `Secure` field set to + `false`. + + > **Warning**: Using this option can be useful when running a local + > registry, but introduces security vulnerabilities. This option + > should therefore ONLY be used for testing purposes. For increased + > security, users should add their CA to their system's list of trusted + > CAs instead of enabling this option. + type: "array" + items: + type: "string" + example: ["::1/128", "127.0.0.0/8"] + IndexConfigs: + type: "object" + additionalProperties: + $ref: "#/definitions/IndexInfo" + example: + "127.0.0.1:5000": + "Name": "127.0.0.1:5000" + "Mirrors": [] + "Secure": false + "Official": false + "[2001:db8:a0b:12f0::1]:80": + "Name": "[2001:db8:a0b:12f0::1]:80" + "Mirrors": [] + "Secure": false + "Official": false + "docker.io": + Name: "docker.io" + Mirrors: ["https://hub-mirror.corp.example.com:5000/"] + Secure: true + Official: true + "registry.internal.corp.example.com:3000": + Name: "registry.internal.corp.example.com:3000" + Mirrors: [] + Secure: false + Official: false + Mirrors: + description: | + List of registry URLs that act as a mirror for the official + (`docker.io`) registry. + + type: "array" + items: + type: "string" + example: + - "https://hub-mirror.corp.example.com:5000/" + - "https://[2001:db8:a0b:12f0::1]/" + + IndexInfo: + description: + IndexInfo contains information about a registry. + type: "object" + x-nullable: true + properties: + Name: + description: | + Name of the registry, such as "docker.io". + type: "string" + example: "docker.io" + Mirrors: + description: | + List of mirrors, expressed as URIs. + type: "array" + items: + type: "string" + example: + - "https://hub-mirror.corp.example.com:5000/" + - "https://registry-2.docker.io/" + - "https://registry-3.docker.io/" + Secure: + description: | + Indicates if the registry is part of the list of insecure + registries. + + If `false`, the registry is insecure. Insecure registries accept + un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from + unknown CAs) communication. + + > **Warning**: Insecure registries can be useful when running a local + > registry. However, because its use creates security vulnerabilities + > it should ONLY be enabled for testing purposes. For increased + > security, users should add their CA to their system's list of + > trusted CAs instead of enabling this option. + type: "boolean" + example: true + Official: + description: | + Indicates whether this is an official registry (i.e., Docker Hub / docker.io) + type: "boolean" + example: true + + Runtime: + description: | + Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec) + runtime. + + The runtime is invoked by the daemon via the `containerd` daemon. OCI + runtimes act as an interface to the Linux kernel namespaces, cgroups, + and SELinux. + type: "object" + properties: + path: + description: | + Name and, optional, path, of the OCI executable binary. + + If the path is omitted, the daemon searches the host's `$PATH` for the + binary and uses the first result. + type: "string" + example: "/usr/local/bin/my-oci-runtime" + runtimeArgs: + description: | + List of command-line arguments to pass to the runtime when invoked. + type: "array" + x-nullable: true + items: + type: "string" + example: ["--debug", "--systemd-cgroup=false"] + status: + description: | + Information specific to the runtime. + + While this API specification does not define data provided by runtimes, + the following well-known properties may be provided by runtimes: + + `org.opencontainers.runtime-spec.features`: features structure as defined + in the [OCI Runtime Specification](https://github.com/opencontainers/runtime-spec/blob/main/features.md), + in a JSON string representation. + +


+ + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + "org.opencontainers.runtime-spec.features": "{\"ociVersionMin\":\"1.0.0\",\"ociVersionMax\":\"1.1.0\",\"...\":\"...\"}" + + Commit: + description: | + Commit holds the Git-commit (SHA1) that a binary was built from, as + reported in the version-string of external tools, such as `containerd`, + or `runC`. + type: "object" + properties: + ID: + description: "Actual commit ID of external tool." + type: "string" + example: "cfb82a876ecc11b5ca0977d1733adbe58599088a" + Expected: + description: | + Commit ID of external tool expected by dockerd as set at build time. + + **Deprecated**: This field is deprecated and will be omitted in a API v1.49. + type: "string" + example: "2d41c047c83e09a6d61d464906feb2a2f3c52aa4" + + SwarmInfo: + description: | + Represents generic information about swarm. + type: "object" + properties: + NodeID: + description: "Unique identifier of for this node in the swarm." + type: "string" + default: "" + example: "k67qz4598weg5unwwffg6z1m1" + NodeAddr: + description: | + IP address at which this node can be reached by other nodes in the + swarm. + type: "string" + default: "" + example: "10.0.0.46" + LocalNodeState: + $ref: "#/definitions/LocalNodeState" + ControlAvailable: + type: "boolean" + default: false + example: true + Error: + type: "string" + default: "" + RemoteManagers: + description: | + List of ID's and addresses of other managers in the swarm. + type: "array" + default: null + x-nullable: true + items: + $ref: "#/definitions/PeerNode" + example: + - NodeID: "71izy0goik036k48jg985xnds" + Addr: "10.0.0.158:2377" + - NodeID: "79y6h1o4gv8n120drcprv5nmc" + Addr: "10.0.0.159:2377" + - NodeID: "k67qz4598weg5unwwffg6z1m1" + Addr: "10.0.0.46:2377" + Nodes: + description: "Total number of nodes in the swarm." + type: "integer" + x-nullable: true + example: 4 + Managers: + description: "Total number of managers in the swarm." + type: "integer" + x-nullable: true + example: 3 + Cluster: + $ref: "#/definitions/ClusterInfo" + + LocalNodeState: + description: "Current local status of this node." + type: "string" + default: "" + enum: + - "" + - "inactive" + - "pending" + - "active" + - "error" + - "locked" + example: "active" + + PeerNode: + description: "Represents a peer-node in the swarm" + type: "object" + properties: + NodeID: + description: "Unique identifier of for this node in the swarm." + type: "string" + Addr: + description: | + IP address and ports at which this node can be reached. + type: "string" + + NetworkAttachmentConfig: + description: | + Specifies how a service should be attached to a particular network. + type: "object" + properties: + Target: + description: | + The target network for attachment. Must be a network name or ID. + type: "string" + Aliases: + description: | + Discoverable alternate names for the service on this network. + type: "array" + items: + type: "string" + DriverOpts: + description: | + Driver attachment options for the network target. + type: "object" + additionalProperties: + type: "string" + + EventActor: + description: | + Actor describes something that generates events, like a container, network, + or a volume. + type: "object" + properties: + ID: + description: "The ID of the object emitting the event" + type: "string" + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Attributes: + description: | + Various key/value attributes of the object, depending on its type. + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-label-value" + image: "alpine:latest" + name: "my-container" + + EventMessage: + description: | + EventMessage represents the information an event contains. + type: "object" + title: "SystemEventsResponse" + properties: + Type: + description: "The type of object emitting the event" + type: "string" + enum: ["builder", "config", "container", "daemon", "image", "network", "node", "plugin", "secret", "service", "volume"] + example: "container" + Action: + description: "The type of event" + type: "string" + example: "create" + Actor: + $ref: "#/definitions/EventActor" + scope: + description: | + Scope of the event. Engine events are `local` scope. Cluster (Swarm) + events are `swarm` scope. + type: "string" + enum: ["local", "swarm"] + time: + description: "Timestamp of event" + type: "integer" + format: "int64" + example: 1629574695 + timeNano: + description: "Timestamp of event, with nanosecond accuracy" + type: "integer" + format: "int64" + example: 1629574695515050031 + + OCIDescriptor: + type: "object" + x-go-name: Descriptor + description: | + A descriptor struct containing digest, media type, and size, as defined in + the [OCI Content Descriptors Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/descriptor.md). + properties: + mediaType: + description: | + The media type of the object this schema refers to. + type: "string" + example: "application/vnd.oci.image.manifest.v1+json" + digest: + description: | + The digest of the targeted content. + type: "string" + example: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" + size: + description: | + The size in bytes of the blob. + type: "integer" + format: "int64" + example: 424 + urls: + description: |- + List of URLs from which this object MAY be downloaded. + type: "array" + items: + type: "string" + format: "uri" + x-nullable: true + annotations: + description: |- + Arbitrary metadata relating to the targeted content. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + "com.docker.official-images.bashbrew.arch": "amd64" + "org.opencontainers.image.base.digest": "sha256:0d0ef5c914d3ea700147da1bd050c59edb8bb12ca312f3800b29d7c8087eabd8" + "org.opencontainers.image.base.name": "scratch" + "org.opencontainers.image.created": "2025-01-27T00:00:00Z" + "org.opencontainers.image.revision": "9fabb4bad5138435b01857e2fe9363e2dc5f6a79" + "org.opencontainers.image.source": "https://git.launchpad.net/cloud-images/+oci/ubuntu-base" + "org.opencontainers.image.url": "https://hub.docker.com/_/ubuntu" + "org.opencontainers.image.version": "24.04" + data: + type: string + x-nullable: true + description: |- + Data is an embedding of the targeted content. This is encoded as a base64 + string when marshalled to JSON (automatically, by encoding/json). If + present, Data can be used directly to avoid fetching the targeted content. + example: null + platform: + $ref: "#/definitions/OCIPlatform" + artifactType: + description: |- + ArtifactType is the IANA media type of this artifact. + type: "string" + x-nullable: true + example: null + + OCIPlatform: + type: "object" + x-go-name: Platform + x-nullable: true + description: | + Describes the platform which the image in the manifest runs on, as defined + in the [OCI Image Index Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/image-index.md). + properties: + architecture: + description: | + The CPU architecture, for example `amd64` or `ppc64`. + type: "string" + example: "arm" + os: + description: | + The operating system, for example `linux` or `windows`. + type: "string" + example: "windows" + os.version: + description: | + Optional field specifying the operating system version, for example on + Windows `10.0.19041.1165`. + type: "string" + example: "10.0.19041.1165" + os.features: + description: | + Optional field specifying an array of strings, each listing a required + OS feature (for example on Windows `win32k`). + type: "array" + items: + type: "string" + example: + - "win32k" + variant: + description: | + Optional field specifying a variant of the CPU, for example `v7` to + specify ARMv7 when architecture is `arm`. + type: "string" + example: "v7" + + DistributionInspect: + type: "object" + x-go-name: DistributionInspect + title: "DistributionInspectResponse" + required: [Descriptor, Platforms] + description: | + Describes the result obtained from contacting the registry to retrieve + image metadata. + properties: + Descriptor: + $ref: "#/definitions/OCIDescriptor" + Platforms: + type: "array" + description: | + An array containing all platforms supported by the image. + items: + $ref: "#/definitions/OCIPlatform" + + ClusterVolume: + type: "object" + description: | + Options and information specific to, and only present on, Swarm CSI + cluster volumes. + properties: + ID: + type: "string" + description: | + The Swarm ID of this volume. Because cluster volumes are Swarm + objects, they have an ID, unlike non-cluster volumes. This ID can + be used to refer to the Volume instead of the name. + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ClusterVolumeSpec" + Info: + type: "object" + description: | + Information about the global status of the volume. + properties: + CapacityBytes: + type: "integer" + format: "int64" + description: | + The capacity of the volume in bytes. A value of 0 indicates that + the capacity is unknown. + VolumeContext: + type: "object" + description: | + A map of strings to strings returned from the storage plugin when + the volume is created. + additionalProperties: + type: "string" + VolumeID: + type: "string" + description: | + The ID of the volume as returned by the CSI storage plugin. This + is distinct from the volume's ID as provided by Docker. This ID + is never used by the user when communicating with Docker to refer + to this volume. If the ID is blank, then the Volume has not been + successfully created in the plugin yet. + AccessibleTopology: + type: "array" + description: | + The topology this volume is actually accessible from. + items: + $ref: "#/definitions/Topology" + PublishStatus: + type: "array" + description: | + The status of the volume as it pertains to its publishing and use on + specific nodes + items: + type: "object" + properties: + NodeID: + type: "string" + description: | + The ID of the Swarm node the volume is published on. + State: + type: "string" + description: | + The published state of the volume. + * `pending-publish` The volume should be published to this node, but the call to the controller plugin to do so has not yet been successfully completed. + * `published` The volume is published successfully to the node. + * `pending-node-unpublish` The volume should be unpublished from the node, and the manager is awaiting confirmation from the worker that it has done so. + * `pending-controller-unpublish` The volume is successfully unpublished from the node, but has not yet been successfully unpublished on the controller. + enum: + - "pending-publish" + - "published" + - "pending-node-unpublish" + - "pending-controller-unpublish" + PublishContext: + type: "object" + description: | + A map of strings to strings returned by the CSI controller + plugin when a volume is published. + additionalProperties: + type: "string" + + ClusterVolumeSpec: + type: "object" + description: | + Cluster-specific options used to create the volume. + properties: + Group: + type: "string" + description: | + Group defines the volume group of this volume. Volumes belonging to + the same group can be referred to by group name when creating + Services. Referring to a volume by group instructs Swarm to treat + volumes in that group interchangeably for the purpose of scheduling. + Volumes with an empty string for a group technically all belong to + the same, emptystring group. + AccessMode: + type: "object" + description: | + Defines how the volume is used by tasks. + properties: + Scope: + type: "string" + description: | + The set of nodes this volume can be used on at one time. + - `single` The volume may only be scheduled to one node at a time. + - `multi` the volume may be scheduled to any supported number of nodes at a time. + default: "single" + enum: ["single", "multi"] + x-nullable: false + Sharing: + type: "string" + description: | + The number and way that different tasks can use this volume + at one time. + - `none` The volume may only be used by one task at a time. + - `readonly` The volume may be used by any number of tasks, but they all must mount the volume as readonly + - `onewriter` The volume may be used by any number of tasks, but only one may mount it as read/write. + - `all` The volume may have any number of readers and writers. + default: "none" + enum: ["none", "readonly", "onewriter", "all"] + x-nullable: false + MountVolume: + type: "object" + description: | + Options for using this volume as a Mount-type volume. + + Either MountVolume or BlockVolume, but not both, must be + present. + properties: + FsType: + type: "string" + description: | + Specifies the filesystem type for the mount volume. + Optional. + MountFlags: + type: "array" + description: | + Flags to pass when mounting the volume. Optional. + items: + type: "string" + BlockVolume: + type: "object" + description: | + Options for using this volume as a Block-type volume. + Intentionally empty. + Secrets: + type: "array" + description: | + Swarm Secrets that are passed to the CSI storage plugin when + operating on this volume. + items: + type: "object" + description: | + One cluster volume secret entry. Defines a key-value pair that + is passed to the plugin. + properties: + Key: + type: "string" + description: | + Key is the name of the key of the key-value pair passed to + the plugin. + Secret: + type: "string" + description: | + Secret is the swarm Secret object from which to read data. + This can be a Secret name or ID. The Secret data is + retrieved by swarm and used as the value of the key-value + pair passed to the plugin. + AccessibilityRequirements: + type: "object" + description: | + Requirements for the accessible topology of the volume. These + fields are optional. For an in-depth description of what these + fields mean, see the CSI specification. + properties: + Requisite: + type: "array" + description: | + A list of required topologies, at least one of which the + volume must be accessible from. + items: + $ref: "#/definitions/Topology" + Preferred: + type: "array" + description: | + A list of topologies that the volume should attempt to be + provisioned in. + items: + $ref: "#/definitions/Topology" + CapacityRange: + type: "object" + description: | + The desired capacity that the volume should be created with. If + empty, the plugin will decide the capacity. + properties: + RequiredBytes: + type: "integer" + format: "int64" + description: | + The volume must be at least this big. The value of 0 + indicates an unspecified minimum + LimitBytes: + type: "integer" + format: "int64" + description: | + The volume must not be bigger than this. The value of 0 + indicates an unspecified maximum. + Availability: + type: "string" + description: | + The availability of the volume for use in tasks. + - `active` The volume is fully available for scheduling on the cluster + - `pause` No new workloads should use the volume, but existing workloads are not stopped. + - `drain` All workloads using this volume should be stopped and rescheduled, and no new ones should be started. + default: "active" + x-nullable: false + enum: + - "active" + - "pause" + - "drain" + + Topology: + description: | + A map of topological domains to topological segments. For in depth + details, see documentation for the Topology object in the CSI + specification. + type: "object" + additionalProperties: + type: "string" + + ImageManifestSummary: + x-go-name: "ManifestSummary" + description: | + ImageManifestSummary represents a summary of an image manifest. + type: "object" + required: ["ID", "Descriptor", "Available", "Size", "Kind"] + properties: + ID: + description: | + ID is the content-addressable ID of an image and is the same as the + digest of the image manifest. + type: "string" + example: "sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f" + Descriptor: + $ref: "#/definitions/OCIDescriptor" + Available: + description: Indicates whether all the child content (image config, layers) is fully available locally. + type: "boolean" + example: true + Size: + type: "object" + x-nullable: false + required: ["Content", "Total"] + properties: + Total: + type: "integer" + format: "int64" + example: 8213251 + description: | + Total is the total size (in bytes) of all the locally present + data (both distributable and non-distributable) that's related to + this manifest and its children. + This equal to the sum of [Content] size AND all the sizes in the + [Size] struct present in the Kind-specific data struct. + For example, for an image kind (Kind == "image") + this would include the size of the image content and unpacked + image snapshots ([Size.Content] + [ImageData.Size.Unpacked]). + Content: + description: | + Content is the size (in bytes) of all the locally present + content in the content store (e.g. image config, layers) + referenced by this manifest and its children. + This only includes blobs in the content store. + type: "integer" + format: "int64" + example: 3987495 + Kind: + type: "string" + example: "image" + enum: + - "image" + - "attestation" + - "unknown" + description: | + The kind of the manifest. + + kind | description + -------------|----------------------------------------------------------- + image | Image manifest that can be used to start a container. + attestation | Attestation manifest produced by the Buildkit builder for a specific image manifest. + ImageData: + description: | + The image data for the image manifest. + This field is only populated when Kind is "image". + type: "object" + x-nullable: true + x-omitempty: true + required: ["Platform", "Containers", "Size", "UnpackedSize"] + properties: + Platform: + $ref: "#/definitions/OCIPlatform" + description: | + OCI platform of the image. This will be the platform specified in the + manifest descriptor from the index/manifest list. + If it's not available, it will be obtained from the image config. + Containers: + description: | + The IDs of the containers that are using this image. + type: "array" + items: + type: "string" + example: ["ede54ee1fda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c7430", "abadbce344c096744d8d6071a90d474d28af8f1034b5ea9fb03c3f4bfc6d005e"] + Size: + type: "object" + x-nullable: false + required: ["Unpacked"] + properties: + Unpacked: + type: "integer" + format: "int64" + example: 3987495 + description: | + Unpacked is the size (in bytes) of the locally unpacked + (uncompressed) image content that's directly usable by the containers + running this image. + It's independent of the distributable content - e.g. + the image might still have an unpacked data that's still used by + some container even when the distributable/compressed content is + already gone. + AttestationData: + description: | + The image data for the attestation manifest. + This field is only populated when Kind is "attestation". + type: "object" + x-nullable: true + x-omitempty: true + required: ["For"] + properties: + For: + description: | + The digest of the image manifest that this attestation is for. + type: "string" + example: "sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f" + +paths: + /containers/json: + get: + summary: "List containers" + description: | + Returns a list of containers. For details on the format, see the + [inspect endpoint](#operation/ContainerInspect). + + Note that it uses a different, smaller representation of a container + than inspecting a single container. For example, the list of linked + containers is not propagated . + operationId: "ContainerList" + produces: + - "application/json" + parameters: + - name: "all" + in: "query" + description: | + Return all containers. By default, only running containers are shown. + type: "boolean" + default: false + - name: "limit" + in: "query" + description: | + Return this number of most recently created containers, including + non-running ones. + type: "integer" + - name: "size" + in: "query" + description: | + Return the size of container as fields `SizeRw` and `SizeRootFs`. + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + Filters to process on the container list, encoded as JSON (a + `map[string][]string`). For example, `{"status": ["paused"]}` will + only return paused containers. + + Available filters: + + - `ancestor`=(`[:]`, ``, or ``) + - `before`=(`` or ``) + - `expose`=(`[/]`|`/[]`) + - `exited=` containers with exit code of `` + - `health`=(`starting`|`healthy`|`unhealthy`|`none`) + - `id=` a container's ID + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + - `is-task=`(`true`|`false`) + - `label=key` or `label="key=value"` of a container label + - `name=` a container's name + - `network`=(`` or ``) + - `publish`=(`[/]`|`/[]`) + - `since`=(`` or ``) + - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) + - `volume`=(`` or ``) + type: "string" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/ContainerSummary" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/create: + post: + summary: "Create a container" + operationId: "ContainerCreate" + consumes: + - "application/json" + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "name" + in: "query" + description: | + Assign the specified name to the container. Must match + `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`. + type: "string" + pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$" + - name: "platform" + in: "query" + description: | + Platform in the format `os[/arch[/variant]]` used for image lookup. + + When specified, the daemon checks if the requested image is present + in the local image cache with the given OS and Architecture, and + otherwise returns a `404` status. + + If the option is not set, the host's native OS and Architecture are + used to look up the image in the image cache. However, if no platform + is passed and the given image does exist in the local image cache, + but its OS or architecture does not match, the container is created + with the available image, and a warning is added to the `Warnings` + field in the response, for example; + + WARNING: The requested image's platform (linux/arm64/v8) does not + match the detected host platform (linux/amd64) and no + specific platform was requested + + type: "string" + default: "" + - name: "body" + in: "body" + description: "Container to create" + schema: + allOf: + - $ref: "#/definitions/ContainerConfig" + - type: "object" + properties: + HostConfig: + $ref: "#/definitions/HostConfig" + NetworkingConfig: + $ref: "#/definitions/NetworkingConfig" + example: + Hostname: "" + Domainname: "" + User: "" + AttachStdin: false + AttachStdout: true + AttachStderr: true + Tty: false + OpenStdin: false + StdinOnce: false + Env: + - "FOO=bar" + - "BAZ=quux" + Cmd: + - "date" + Entrypoint: "" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + Volumes: + /volumes/data: {} + WorkingDir: "" + NetworkDisabled: false + MacAddress: "12:34:56:78:9a:bc" + ExposedPorts: + 22/tcp: {} + StopSignal: "SIGTERM" + StopTimeout: 10 + HostConfig: + Binds: + - "/tmp:/tmp" + Links: + - "redis3:redis" + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + NanoCpus: 500000 + CpuPercent: 80 + CpuShares: 512 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpuQuota: 50000 + CpusetCpus: "0,1" + CpusetMems: "0,1" + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 300 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceWriteIOps: + - {} + DeviceRequests: + - Driver: "nvidia" + Count: -1 + DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] + Capabilities: [["gpu", "nvidia", "compute"]] + Options: + property1: "string" + property2: "string" + MemorySwappiness: 60 + OomKillDisable: false + OomScoreAdj: 500 + PidMode: "" + PidsLimit: 0 + PortBindings: + 22/tcp: + - HostPort: "11022" + PublishAllPorts: false + Privileged: false + ReadonlyRootfs: false + Dns: + - "8.8.8.8" + DnsOptions: + - "" + DnsSearch: + - "" + VolumesFrom: + - "parent" + - "other:ro" + CapAdd: + - "NET_ADMIN" + CapDrop: + - "MKNOD" + GroupAdd: + - "newgroup" + RestartPolicy: + Name: "" + MaximumRetryCount: 0 + AutoRemove: true + NetworkMode: "bridge" + Devices: [] + Ulimits: + - {} + LogConfig: + Type: "json-file" + Config: {} + SecurityOpt: [] + StorageOpt: {} + CgroupParent: "" + VolumeDriver: "" + ShmSize: 67108864 + NetworkingConfig: + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + database_nw: {} + + required: true + responses: + 201: + description: "Container created successfully" + schema: + $ref: "#/definitions/ContainerCreateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/{id}/json: + get: + summary: "Inspect a container" + description: "Return low-level information about a container." + operationId: "ContainerInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerInspectResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "size" + in: "query" + type: "boolean" + default: false + description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" + tags: ["Container"] + /containers/{id}/top: + get: + summary: "List processes running inside a container" + description: | + On Unix systems, this is done by running the `ps` command. This endpoint + is not supported on Windows. + operationId: "ContainerTop" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerTopResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "ps_args" + in: "query" + description: "The arguments to pass to `ps`. For example, `aux`" + type: "string" + default: "-ef" + tags: ["Container"] + /containers/{id}/logs: + get: + summary: "Get container logs" + description: | + Get `stdout` and `stderr` logs from a container. + + Note: This endpoint works only for containers with the `json-file` or + `journald` logging driver. + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + operationId: "ContainerLogs" + responses: + 200: + description: | + logs returned as a stream in response body. + For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + Note that unlike the attach endpoint, the logs endpoint does not + upgrade the connection and does not set Content-Type. + schema: + type: "string" + format: "binary" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "until" + in: "query" + description: "Only return logs before this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Container"] + /containers/{id}/changes: + get: + summary: "Get changes on a container’s filesystem" + description: | + Returns which files in a container's filesystem have been added, deleted, + or modified. The `Kind` of modification can be one of: + + - `0`: Modified ("C") + - `1`: Added ("A") + - `2`: Deleted ("D") + operationId: "ContainerChanges" + produces: ["application/json"] + responses: + 200: + description: "The list of changes" + schema: + type: "array" + items: + $ref: "#/definitions/FilesystemChange" + examples: + application/json: + - Path: "/dev" + Kind: 0 + - Path: "/dev/kmsg" + Kind: 1 + - Path: "/test" + Kind: 1 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/export: + get: + summary: "Export a container" + description: "Export the contents of a container as a tarball." + operationId: "ContainerExport" + produces: + - "application/octet-stream" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/stats: + get: + summary: "Get container stats based on resource usage" + description: | + This endpoint returns a live stream of a container’s resource usage + statistics. + + The `precpu_stats` is the CPU statistic of the *previous* read, and is + used to calculate the CPU usage percentage. It is not an exact copy + of the `cpu_stats` field. + + If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is + nil then for compatibility with older daemons the length of the + corresponding `cpu_usage.percpu_usage` array should be used. + + On a cgroup v2 host, the following fields are not set + * `blkio_stats`: all fields other than `io_service_bytes_recursive` + * `cpu_stats`: `cpu_usage.percpu_usage` + * `memory_stats`: `max_usage` and `failcnt` + Also, `memory_stats.stats` fields are incompatible with cgroup v1. + + To calculate the values shown by the `stats` command of the docker cli tool + the following formulas can be used: + * used_memory = `memory_stats.usage - memory_stats.stats.cache` + * available_memory = `memory_stats.limit` + * Memory usage % = `(used_memory / available_memory) * 100.0` + * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` + * system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage` + * number_cpus = `length(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus` + * CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0` + operationId: "ContainerStats" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerStatsResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "stream" + in: "query" + description: | + Stream the output. If false, the stats will be output once and then + it will disconnect. + type: "boolean" + default: true + - name: "one-shot" + in: "query" + description: | + Only get a single stat instead of waiting for 2 cycles. Must be used + with `stream=false`. + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/resize: + post: + summary: "Resize a container TTY" + description: "Resize the TTY for a container." + operationId: "ContainerResize" + consumes: + - "application/octet-stream" + produces: + - "text/plain" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "cannot resize container" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "h" + in: "query" + required: true + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + required: true + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Container"] + /containers/{id}/start: + post: + summary: "Start a container" + operationId: "ContainerStart" + responses: + 204: + description: "no error" + 304: + description: "container already started" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container. Format is a + single character `[a-Z]` or `ctrl-` where `` is one + of: `a-z`, `@`, `^`, `[`, `,` or `_`. + type: "string" + tags: ["Container"] + /containers/{id}/stop: + post: + summary: "Stop a container" + operationId: "ContainerStop" + responses: + 204: + description: "no error" + 304: + description: "container already stopped" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/restart: + post: + summary: "Restart a container" + operationId: "ContainerRestart" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/kill: + post: + summary: "Kill a container" + description: | + Send a POSIX signal to a container, defaulting to killing to the + container. + operationId: "ContainerKill" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is not running" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "Container d37cde0fe4ad63c3a7252023b2f9800282894247d145cb5933ddf6e52cc03a28 is not running" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + default: "SIGKILL" + tags: ["Container"] + /containers/{id}/update: + post: + summary: "Update a container" + description: | + Change various configuration options of a container without having to + recreate it. + operationId: "ContainerUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "The container has been updated." + schema: + $ref: "#/definitions/ContainerUpdateResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "update" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + example: + BlkioWeight: 300 + CpuShares: 512 + CpuPeriod: 100000 + CpuQuota: 50000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpusetCpus: "0,1" + CpusetMems: "0" + Memory: 314572800 + MemorySwap: 514288000 + MemoryReservation: 209715200 + RestartPolicy: + MaximumRetryCount: 4 + Name: "on-failure" + tags: ["Container"] + /containers/{id}/rename: + post: + summary: "Rename a container" + operationId: "ContainerRename" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "name already in use" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "name" + in: "query" + required: true + description: "New name for the container" + type: "string" + tags: ["Container"] + /containers/{id}/pause: + post: + summary: "Pause a container" + description: | + Use the freezer cgroup to suspend all processes in a container. + + Traditionally, when suspending a process the `SIGSTOP` signal is used, + which is observable by the process being suspended. With the freezer + cgroup the process is unaware, and unable to capture, that it is being + suspended, and subsequently resumed. + operationId: "ContainerPause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/unpause: + post: + summary: "Unpause a container" + description: "Resume a container which has been paused." + operationId: "ContainerUnpause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/attach: + post: + summary: "Attach to a container" + description: | + Attach to a container to read its output or send it input. You can attach + to the same container multiple times and you can reattach to containers + that have been detached. + + Either the `stream` or `logs` parameter must be `true` for this endpoint + to do anything. + + See the [documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) + for more details. + + ### Hijacking + + This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, + and `stderr` on the same socket. + + This is the response from the daemon for an attach request: + + ``` + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + [STREAM] + ``` + + After the headers and two new lines, the TCP connection can now be used + for raw, bidirectional communication between the client and server. + + To hint potential proxies about connection hijacking, the Docker client + can also optionally send connection upgrade headers. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 + Upgrade: tcp + Connection: Upgrade + ``` + + The Docker daemon will respond with a `101 UPGRADED` response, and will + similarly follow with the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + [STREAM] + ``` + + ### Stream format + + When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), + the HTTP Content-Type header is set to application/vnd.docker.multiplexed-stream + and the stream over the hijacked connected is multiplexed to separate out + `stdout` and `stderr`. The stream consists of a series of frames, each + containing a header and a payload. + + The header contains the information which the stream writes (`stdout` or + `stderr`). It also contains the size of the associated frame encoded in + the last four bytes (`uint32`). + + It is encoded on the first eight bytes like this: + + ```go + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + ``` + + `STREAM_TYPE` can be: + + - 0: `stdin` (is written on `stdout`) + - 1: `stdout` + - 2: `stderr` + + `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size + encoded as big endian. + + Following the header is the payload, which is the specified number of + bytes of `STREAM_TYPE`. + + The simplest way to implement this protocol is the following: + + 1. Read 8 bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + + ### Stream format when using a TTY + + When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), + the stream is not multiplexed. The data exchanged over the hijacked + connection is simply the raw data from the process PTY and client's + `stdin`. + + operationId: "ContainerAttach" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container.Format is a single + character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, + `@`, `^`, `[`, `,` or `_`. + type: "string" + - name: "logs" + in: "query" + description: | + Replay previous logs from the container. + + This is useful for attaching to a container that has started and you + want to output everything since the container started. + + If `stream` is also enabled, once all the previous output has been + returned, it will seamlessly transition into streaming current + output. + type: "boolean" + default: false + - name: "stream" + in: "query" + description: | + Stream attached streams from the time the request was made onwards. + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/attach/ws: + get: + summary: "Attach to a container via a websocket" + operationId: "ContainerAttachWebsocket" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container.Format is a single + character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, + `@`, `^`, `[`, `,`, or `_`. + type: "string" + - name: "logs" + in: "query" + description: "Return logs" + type: "boolean" + default: false + - name: "stream" + in: "query" + description: "Return stream" + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/wait: + post: + summary: "Wait for a container" + description: "Block until a container stops, then returns the exit code." + operationId: "ContainerWait" + produces: ["application/json"] + responses: + 200: + description: "The container has exit." + schema: + $ref: "#/definitions/ContainerWaitResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "condition" + in: "query" + description: | + Wait until a container state reaches the given condition. + + Defaults to `not-running` if omitted or empty. + type: "string" + enum: + - "not-running" + - "next-exit" + - "removed" + default: "not-running" + tags: ["Container"] + /containers/{id}: + delete: + summary: "Remove a container" + operationId: "ContainerDelete" + responses: + 204: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: | + You cannot remove a running container: c2ada9df5af8. Stop the + container before attempting removal or force remove + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "v" + in: "query" + description: "Remove anonymous volumes associated with the container." + type: "boolean" + default: false + - name: "force" + in: "query" + description: "If the container is running, kill it before removing it." + type: "boolean" + default: false + - name: "link" + in: "query" + description: "Remove the specified link associated with the container." + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/archive: + head: + summary: "Get information about files in a container" + description: | + A response header `X-Docker-Container-Path-Stat` is returned, containing + a base64 - encoded JSON object with some filesystem header information + about the path. + operationId: "ContainerArchiveInfo" + responses: + 200: + description: "no error" + headers: + X-Docker-Container-Path-Stat: + type: "string" + description: | + A base64 - encoded JSON object with some filesystem header + information about the path + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + get: + summary: "Get an archive of a filesystem resource in a container" + description: "Get a tar archive of a resource in the filesystem of container id." + operationId: "ContainerArchive" + produces: ["application/x-tar"] + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + put: + summary: "Extract an archive of files or folders to a directory in a container" + description: | + Upload a tar archive to be extracted to a path in the filesystem of container id. + `path` parameter is asserted to be a directory. If it exists as a file, 400 error + will be returned with message "not a directory". + operationId: "PutContainerArchive" + consumes: ["application/x-tar", "application/octet-stream"] + responses: + 200: + description: "The content was extracted successfully" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "not a directory" + 403: + description: "Permission denied, the volume or container rootfs is marked as read-only." + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such container or path does not exist inside the container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Path to a directory in the container to extract the archive’s contents into. " + type: "string" + - name: "noOverwriteDirNonDir" + in: "query" + description: | + If `1`, `true`, or `True` then it will be an error if unpacking the + given content would cause an existing directory to be replaced with + a non-directory and vice versa. + type: "string" + - name: "copyUIDGID" + in: "query" + description: | + If `1`, `true`, then it will copy UID/GID maps to the dest file or + dir + type: "string" + - name: "inputStream" + in: "body" + required: true + description: | + The input stream must be a tar archive compressed with one of the + following algorithms: `identity` (no compression), `gzip`, `bzip2`, + or `xz`. + schema: + type: "string" + format: "binary" + tags: ["Container"] + /containers/prune: + post: + summary: "Delete stopped containers" + produces: + - "application/json" + operationId: "ContainerPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=` Prune containers created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune containers with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ContainerPruneResponse" + properties: + ContainersDeleted: + description: "Container IDs that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /images/json: + get: + summary: "List Images" + description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." + operationId: "ImageList" + produces: + - "application/json" + responses: + 200: + description: "Summary image data for the images matching the query" + schema: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "all" + in: "query" + description: "Show all images. Only images from a final layer (no children) are shown by default." + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the images list. + + Available filters: + + - `before`=(`[:]`, `` or ``) + - `dangling=true` + - `label=key` or `label="key=value"` of an image label + - `reference`=(`[:]`) + - `since`=(`[:]`, `` or ``) + - `until=` + type: "string" + - name: "shared-size" + in: "query" + description: "Compute and show shared size as a `SharedSize` field on each image." + type: "boolean" + default: false + - name: "digests" + in: "query" + description: "Show digest information as a `RepoDigests` field on each image." + type: "boolean" + default: false + - name: "manifests" + in: "query" + description: "Include `Manifests` in the image summary." + type: "boolean" + default: false + tags: ["Image"] + /build: + post: + summary: "Build an image" + description: | + Build an image from a tar archive with a `Dockerfile` in it. + + The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). + + The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. + + The build is canceled if the client drops the connection by quitting or being killed. + operationId: "ImageBuild" + consumes: + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "inputStream" + in: "body" + description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." + schema: + type: "string" + format: "binary" + - name: "dockerfile" + in: "query" + description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." + type: "string" + default: "Dockerfile" + - name: "t" + in: "query" + description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." + type: "string" + - name: "extrahosts" + in: "query" + description: "Extra hosts to add to /etc/hosts" + type: "string" + - name: "remote" + in: "query" + description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." + type: "string" + - name: "q" + in: "query" + description: "Suppress verbose build output." + type: "boolean" + default: false + - name: "nocache" + in: "query" + description: "Do not use the cache when building the image." + type: "boolean" + default: false + - name: "cachefrom" + in: "query" + description: "JSON array of images used for build cache resolution." + type: "string" + - name: "pull" + in: "query" + description: "Attempt to pull the image even if an older image exists locally." + type: "string" + - name: "rm" + in: "query" + description: "Remove intermediate containers after a successful build." + type: "boolean" + default: true + - name: "forcerm" + in: "query" + description: "Always remove intermediate containers, even upon failure." + type: "boolean" + default: false + - name: "memory" + in: "query" + description: "Set memory limit for build." + type: "integer" + - name: "memswap" + in: "query" + description: "Total memory (memory + swap). Set as `-1` to disable swap." + type: "integer" + - name: "cpushares" + in: "query" + description: "CPU shares (relative weight)." + type: "integer" + - name: "cpusetcpus" + in: "query" + description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." + type: "string" + - name: "cpuperiod" + in: "query" + description: "The length of a CPU period in microseconds." + type: "integer" + - name: "cpuquota" + in: "query" + description: "Microseconds of CPU time that the container can get in a CPU period." + type: "integer" + - name: "buildargs" + in: "query" + description: > + JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker + uses the buildargs as the environment context for commands run via the `Dockerfile` RUN + instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for + passing secret values. + + + For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the + query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded. + + + [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg) + type: "string" + - name: "shmsize" + in: "query" + description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." + type: "integer" + - name: "squash" + in: "query" + description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" + type: "boolean" + - name: "labels" + in: "query" + description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." + type: "string" + - name: "networkmode" + in: "query" + description: | + Sets the networking mode for the run commands during build. Supported + standard values are: `bridge`, `host`, `none`, and `container:`. + Any other value is taken as a custom network's name or ID to which this + container should connect to. + type: "string" + - name: "Content-type" + in: "header" + type: "string" + enum: + - "application/x-tar" + default: "application/x-tar" + - name: "X-Registry-Config" + in: "header" + description: | + This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. + + The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: + + ``` + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + ``` + + Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. + type: "string" + - name: "platform" + in: "query" + description: "Platform in the format os[/arch[/variant]]" + type: "string" + default: "" + - name: "target" + in: "query" + description: "Target build stage" + type: "string" + default: "" + - name: "outputs" + in: "query" + description: "BuildKit output configuration" + type: "string" + default: "" + - name: "version" + in: "query" + type: "string" + default: "1" + enum: ["1", "2"] + description: | + Version of the builder backend to use. + + - `1` is the first generation classic (deprecated) builder in the Docker daemon (default) + - `2` is [BuildKit](https://github.com/moby/buildkit) + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /build/prune: + post: + summary: "Delete builder cache" + produces: + - "application/json" + operationId: "BuildPrune" + parameters: + - name: "keep-storage" + in: "query" + description: | + Amount of disk space in bytes to keep for cache + + > **Deprecated**: This parameter is deprecated and has been renamed to "reserved-space". + > It is kept for backward compatibility and will be removed in API v1.49. + type: "integer" + format: "int64" + - name: "reserved-space" + in: "query" + description: "Amount of disk space in bytes to keep for cache" + type: "integer" + format: "int64" + - name: "max-used-space" + in: "query" + description: "Maximum amount of disk space allowed to keep for cache" + type: "integer" + format: "int64" + - name: "min-free-space" + in: "query" + description: "Target amount of free disk space after pruning" + type: "integer" + format: "int64" + - name: "all" + in: "query" + type: "boolean" + description: "Remove all types of build cache" + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the list of build cache objects. + + Available filters: + + - `until=` remove cache older than ``. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon's local time. + - `id=` + - `parent=` + - `type=` + - `description=` + - `inuse` + - `shared` + - `private` + responses: + 200: + description: "No error" + schema: + type: "object" + title: "BuildPruneResponse" + properties: + CachesDeleted: + type: "array" + items: + description: "ID of build cache object" + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /images/create: + post: + summary: "Create an image" + description: "Pull or import an image." + operationId: "ImageCreate" + consumes: + - "text/plain" + - "application/octet-stream" + produces: + - "application/json" + responses: + 200: + description: "no error" + 404: + description: "repository does not exist or no read access" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "fromImage" + in: "query" + description: | + Name of the image to pull. If the name includes a tag or digest, specific behavior applies: + + - If only `fromImage` includes a tag, that tag is used. + - If both `fromImage` and `tag` are provided, `tag` takes precedence. + - If `fromImage` includes a digest, the image is pulled by digest, and `tag` is ignored. + - If neither a tag nor digest is specified, all tags are pulled. + type: "string" + - name: "fromSrc" + in: "query" + description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." + type: "string" + - name: "repo" + in: "query" + description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." + type: "string" + - name: "tag" + in: "query" + description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." + type: "string" + - name: "message" + in: "query" + description: "Set commit message for imported image." + type: "string" + - name: "inputImage" + in: "body" + description: "Image content if the value `-` has been specified in fromSrc query parameter" + schema: + type: "string" + required: false + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "changes" + in: "query" + description: | + Apply `Dockerfile` instructions to the image that is created, + for example: `changes=ENV DEBUG=true`. + Note that `ENV DEBUG=true` should be URI component encoded. + + Supported `Dockerfile` instructions: + `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` + type: "array" + items: + type: "string" + - name: "platform" + in: "query" + description: | + Platform in the format os[/arch[/variant]]. + + When used in combination with the `fromImage` option, the daemon checks + if the given image is present in the local image cache with the given + OS and Architecture, and otherwise attempts to pull the image. If the + option is not set, the host's native OS and Architecture are used. + If the given image does not exist in the local image cache, the daemon + attempts to pull the image with the host's native OS and Architecture. + If the given image does exists in the local image cache, but its OS or + architecture does not match, a warning is produced. + + When used with the `fromSrc` option to import an image from an archive, + this option sets the platform information for the imported image. If + the option is not set, the host's native OS and Architecture are used + for the imported image. + type: "string" + default: "" + tags: ["Image"] + /images/{name}/json: + get: + summary: "Inspect an image" + description: "Return low-level information about an image." + operationId: "ImageInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/ImageInspect" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + - name: "manifests" + in: "query" + description: "Include Manifests in the image summary." + type: "boolean" + default: false + required: false + tags: ["Image"] + /images/{name}/history: + get: + summary: "Get the history of an image" + description: "Return parent layers of an image." + operationId: "ImageHistory" + produces: ["application/json"] + responses: + 200: + description: "List of image layers" + schema: + type: "array" + items: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false + examples: + application/json: + - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" + Created: 1398108230 + CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" + Tags: + - "ubuntu:lucid" + - "ubuntu:10.04" + Size: 182964289 + Comment: "" + - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" + Created: 1398108222 + CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" + Tags: [] + Size: 0 + Comment: "" + - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" + Created: 1371157430 + CreatedBy: "" + Tags: + - "scratch12:latest" + - "scratch:latest" + Size: 0 + Comment: "Imported from -" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "platform" + type: "string" + in: "query" + description: | + JSON-encoded OCI platform to select the platform-variant. + If omitted, it defaults to any locally available platform, + prioritizing the daemon's host platform. + + If the daemon provides a multi-platform image store, this selects + the platform-variant to show the history for. If the image is + a single-platform image, or if the multi-platform image does not + provide a variant matching the given platform, an error is returned. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /images/{name}/push: + post: + summary: "Push an image" + description: | + Push an image to a registry. + + If you wish to push an image on to a private registry, that image must + already have a tag which references the registry. For example, + `registry.example.com/myimage:latest`. + + The push is cancelled if the HTTP connection is closed. + operationId: "ImagePush" + consumes: + - "application/octet-stream" + responses: + 200: + description: "No error" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + Name of the image to push. For example, `registry.example.com/myimage`. + The image must be present in the local image store with the same name. + + The name should be provided without tag; if a tag is provided, it + is ignored. For example, `registry.example.com/myimage:latest` is + considered equivalent to `registry.example.com/myimage`. + + Use the `tag` parameter to specify the tag to push. + type: "string" + required: true + - name: "tag" + in: "query" + description: | + Tag of the image to push. For example, `latest`. If no tag is provided, + all tags of the given image that are present in the local image store + are pushed. + type: "string" + - name: "platform" + type: "string" + in: "query" + description: | + JSON-encoded OCI platform to select the platform-variant to push. + If not provided, all available variants will attempt to be pushed. + + If the daemon provides a multi-platform image store, this selects + the platform-variant to push to the registry. If the image is + a single-platform image, or if the multi-platform image does not + provide a variant matching the given platform, an error is returned. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + required: true + tags: ["Image"] + /images/{name}/tag: + post: + summary: "Tag an image" + description: "Tag an image so that it becomes part of a repository." + operationId: "ImageTag" + responses: + 201: + description: "No error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID to tag." + type: "string" + required: true + - name: "repo" + in: "query" + description: "The repository to tag in. For example, `someuser/someimage`." + type: "string" + - name: "tag" + in: "query" + description: "The name of the new tag." + type: "string" + tags: ["Image"] + /images/{name}: + delete: + summary: "Remove an image" + description: | + Remove an image, along with any untagged parent images that were + referenced by that image. + + Images can't be removed if they have descendant images, are being + used by a running container or are being used by a build. + operationId: "ImageDelete" + produces: ["application/json"] + responses: + 200: + description: "The image was deleted successfully" + schema: + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + examples: + application/json: + - Untagged: "3e2f21a89f" + - Deleted: "3e2f21a89f" + - Deleted: "53b4f83ac9" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "force" + in: "query" + description: "Remove the image even if it is being used by stopped containers or has other tags" + type: "boolean" + default: false + - name: "noprune" + in: "query" + description: "Do not delete untagged parent images" + type: "boolean" + default: false + tags: ["Image"] + /images/search: + get: + summary: "Search images" + description: "Search for an image on Docker Hub." + operationId: "ImageSearch" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + type: "object" + title: "ImageSearchResponseItem" + properties: + description: + type: "string" + is_official: + type: "boolean" + is_automated: + description: | + Whether this repository has automated builds enabled. + +


+ + > **Deprecated**: This field is deprecated and will always be "false". + type: "boolean" + example: false + name: + type: "string" + star_count: + type: "integer" + examples: + application/json: + - description: "A minimal Docker image based on Alpine Linux with a complete package index and only 5 MB in size!" + is_official: true + is_automated: false + name: "alpine" + star_count: 10093 + - description: "Busybox base image." + is_official: true + is_automated: false + name: "Busybox base image." + star_count: 3037 + - description: "The PostgreSQL object-relational database system provides reliability and data integrity." + is_official: true + is_automated: false + name: "postgres" + star_count: 12408 + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "term" + in: "query" + description: "Term to search" + type: "string" + required: true + - name: "limit" + in: "query" + description: "Maximum number of results to return" + type: "integer" + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: + + - `is-official=(true|false)` + - `stars=` Matches images that has at least 'number' stars. + type: "string" + tags: ["Image"] + /images/prune: + post: + summary: "Delete unused images" + produces: + - "application/json" + operationId: "ImagePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: + + - `dangling=` When set to `true` (or `1`), prune only + unused *and* untagged images. When set to `false` + (or `0`), all unused images are pruned. + - `until=` Prune images created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune images with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ImagePruneResponse" + properties: + ImagesDeleted: + description: "Images that were deleted" + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /auth: + post: + summary: "Check auth configuration" + description: | + Validate credentials for a registry and, if available, get an identity + token for accessing the registry without password. + operationId: "SystemAuth" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "An identity token was generated successfully." + schema: + type: "object" + title: "SystemAuthResponse" + required: [Status] + properties: + Status: + description: "The status of the authentication" + type: "string" + x-nullable: false + IdentityToken: + description: "An opaque token used to authenticate a user after a successful login" + type: "string" + x-nullable: false + examples: + application/json: + Status: "Login Succeeded" + IdentityToken: "9cbaf023786cd7..." + 204: + description: "No error" + 401: + description: "Auth error" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "authConfig" + in: "body" + description: "Authentication to check" + schema: + $ref: "#/definitions/AuthConfig" + tags: ["System"] + /info: + get: + summary: "Get system information" + operationId: "SystemInfo" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/SystemInfo" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /version: + get: + summary: "Get version" + description: "Returns the version of Docker that is running and various information about the system that Docker is running on." + operationId: "SystemVersion" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/SystemVersion" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /_ping: + get: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPing" + produces: ["text/plain"] + responses: + 200: + description: "no error" + schema: + type: "string" + example: "OK" + headers: + Api-Version: + type: "string" + description: "Max API Version the server supports" + Builder-Version: + type: "string" + description: | + Default version of docker image builder + + The default on Linux is version "2" (BuildKit), but the daemon + can be configured to recommend version "1" (classic Builder). + Windows does not yet support BuildKit for native Windows images, + and uses "1" (classic builder) as a default. + + This value is a recommendation as advertised by the daemon, and + it is up to the client to choose which builder to use. + default: "2" + Docker-Experimental: + type: "boolean" + description: "If the server is running with experimental mode enabled" + Swarm: + type: "string" + enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] + description: | + Contains information about Swarm status of the daemon, + and if the daemon is acting as a manager or worker node. + default: "inactive" + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + headers: + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + tags: ["System"] + head: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPingHead" + produces: ["text/plain"] + responses: + 200: + description: "no error" + schema: + type: "string" + example: "(empty)" + headers: + Api-Version: + type: "string" + description: "Max API Version the server supports" + Builder-Version: + type: "string" + description: "Default version of docker image builder" + Docker-Experimental: + type: "boolean" + description: "If the server is running with experimental mode enabled" + Swarm: + type: "string" + enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] + description: | + Contains information about Swarm status of the daemon, + and if the daemon is acting as a manager or worker node. + default: "inactive" + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /commit: + post: + summary: "Create a new image from a container" + operationId: "ImageCommit" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "containerConfig" + in: "body" + description: "The container configuration" + schema: + $ref: "#/definitions/ContainerConfig" + - name: "container" + in: "query" + description: "The ID or name of the container to commit" + type: "string" + - name: "repo" + in: "query" + description: "Repository name for the created image" + type: "string" + - name: "tag" + in: "query" + description: "Tag name for the create image" + type: "string" + - name: "comment" + in: "query" + description: "Commit message" + type: "string" + - name: "author" + in: "query" + description: "Author of the image (e.g., `John Hannibal Smith `)" + type: "string" + - name: "pause" + in: "query" + description: "Whether to pause the container before committing" + type: "boolean" + default: true + - name: "changes" + in: "query" + description: "`Dockerfile` instructions to apply while committing" + type: "string" + tags: ["Image"] + /events: + get: + summary: "Monitor events" + description: | + Stream real-time events from the server. + + Various objects within Docker report events when something happens to them. + + Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, `update`, and `prune` + + Images report these events: `create`, `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune` + + Volumes report these events: `create`, `mount`, `unmount`, `destroy`, and `prune` + + Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, `remove`, and `prune` + + The Docker daemon reports these events: `reload` + + Services report these events: `create`, `update`, and `remove` + + Nodes report these events: `create`, `update`, and `remove` + + Secrets report these events: `create`, `update`, and `remove` + + Configs report these events: `create`, `update`, and `remove` + + The Builder reports `prune` events + + operationId: "SystemEvents" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/EventMessage" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "since" + in: "query" + description: "Show events created since this timestamp then stream new events." + type: "string" + - name: "until" + in: "query" + description: "Show events created until this timestamp then stop streaming." + type: "string" + - name: "filters" + in: "query" + description: | + A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: + + - `config=` config name or ID + - `container=` container name or ID + - `daemon=` daemon name or ID + - `event=` event type + - `image=` image name or ID + - `label=` image or container label + - `network=` network name or ID + - `node=` node ID + - `plugin`= plugin name or ID + - `scope`= local or swarm + - `secret=` secret name or ID + - `service=` service name or ID + - `type=` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config` + - `volume=` volume name + type: "string" + tags: ["System"] + /system/df: + get: + summary: "Get data usage information" + operationId: "SystemDataUsage" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "SystemDataUsageResponse" + properties: + LayersSize: + type: "integer" + format: "int64" + Images: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + Containers: + type: "array" + items: + $ref: "#/definitions/ContainerSummary" + Volumes: + type: "array" + items: + $ref: "#/definitions/Volume" + BuildCache: + type: "array" + items: + $ref: "#/definitions/BuildCache" + example: + LayersSize: 1092588 + Images: + - + Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + ParentId: "" + RepoTags: + - "busybox:latest" + RepoDigests: + - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6" + Created: 1466724217 + Size: 1092588 + SharedSize: 0 + Labels: {} + Containers: 1 + Containers: + - + Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148" + Names: + - "/top" + Image: "busybox" + ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + Command: "top" + Created: 1472592424 + Ports: [] + SizeRootFs: 1092588 + Labels: {} + State: "exited" + Status: "Exited (0) 56 minutes ago" + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + IPAMConfig: null + Links: null + Aliases: null + NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92" + EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a" + Gateway: "172.18.0.1" + IPAddress: "172.18.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:12:00:02" + Mounts: [] + Volumes: + - + Name: "my-volume" + Driver: "local" + Mountpoint: "/var/lib/docker/volumes/my-volume/_data" + Labels: null + Scope: "local" + Options: null + UsageData: + Size: 10920104 + RefCount: 2 + BuildCache: + - + ID: "hw53o5aio51xtltp5xjp8v7fx" + Parents: [] + Type: "regular" + Description: "pulled from docker.io/library/debian@sha256:234cb88d3020898631af0ccbbcca9a66ae7306ecd30c9720690858c1b007d2a0" + InUse: false + Shared: true + Size: 0 + CreatedAt: "2021-06-28T13:31:01.474619385Z" + LastUsedAt: "2021-07-07T22:02:32.738075951Z" + UsageCount: 26 + - + ID: "ndlpt0hhvkqcdfkputsk4cq9c" + Parents: ["ndlpt0hhvkqcdfkputsk4cq9c"] + Type: "regular" + Description: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" + InUse: false + Shared: true + Size: 51 + CreatedAt: "2021-06-28T13:31:03.002625487Z" + LastUsedAt: "2021-07-07T22:02:32.773909517Z" + UsageCount: 26 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "type" + in: "query" + description: | + Object types, for which to compute and return data. + type: "array" + collectionFormat: multi + items: + type: "string" + enum: ["container", "image", "volume", "build-cache"] + tags: ["System"] + /images/{name}/get: + get: + summary: "Export an image" + description: | + Get a tarball containing all images and metadata for a repository. + + If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. + + ### Image tarball format + + An image tarball contains [Content as defined in the OCI Image Layout Specification](https://github.com/opencontainers/image-spec/blob/v1.1.1/image-layout.md#content). + + Additionally, includes the manifest.json file associated with a backwards compatible docker save format. + + If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. + + ```json + { + "hello-world": { + "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" + } + } + ``` + operationId: "ImageGet" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "platform" + type: "string" + in: "query" + description: | + JSON encoded OCI platform describing a platform which will be used + to select a platform-specific image to be saved if the image is + multi-platform. + If not provided, the full multi-platform image will be saved. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /images/get: + get: + summary: "Export several images" + description: | + Get a tarball containing all images and metadata for several image + repositories. + + For each value of the `names` parameter: if it is a specific name and + tag (e.g. `ubuntu:latest`), then only that image (and its parents) are + returned; if it is an image ID, similarly only that image (and its parents) + are returned and there would be no names referenced in the 'repositories' + file for this image ID. + + For details on the format, see the [export image endpoint](#operation/ImageGet). + operationId: "ImageGetAll" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "names" + in: "query" + description: "Image names to filter by" + type: "array" + items: + type: "string" + - name: "platform" + type: "string" + in: "query" + description: | + JSON encoded OCI platform describing a platform which will be used + to select a platform-specific image to be saved if the image is + multi-platform. + If not provided, the full multi-platform image will be saved. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /images/load: + post: + summary: "Import images" + description: | + Load a set of images and tags into a repository. + + For details on the format, see the [export image endpoint](#operation/ImageGet). + operationId: "ImageLoad" + consumes: + - "application/x-tar" + produces: + - "application/json" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "imagesTarball" + in: "body" + description: "Tar archive containing images" + schema: + type: "string" + format: "binary" + - name: "quiet" + in: "query" + description: "Suppress progress details during load." + type: "boolean" + default: false + - name: "platform" + type: "string" + in: "query" + description: | + JSON encoded OCI platform describing a platform which will be used + to select a platform-specific image to be load if the image is + multi-platform. + If not provided, the full multi-platform image will be loaded. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /containers/{id}/exec: + post: + summary: "Create an exec instance" + description: "Run a command inside a running container." + operationId: "ContainerExec" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is paused" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execConfig" + in: "body" + description: "Exec configuration" + schema: + type: "object" + title: "ExecConfig" + properties: + AttachStdin: + type: "boolean" + description: "Attach to `stdin` of the exec command." + AttachStdout: + type: "boolean" + description: "Attach to `stdout` of the exec command." + AttachStderr: + type: "boolean" + description: "Attach to `stderr` of the exec command." + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array." + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + example: [80, 64] + DetachKeys: + type: "string" + description: | + Override the key sequence for detaching a container. Format is + a single character `[a-Z]` or `ctrl-` where `` + is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + Env: + description: | + A list of environment variables in the form `["VAR=value", ...]`. + type: "array" + items: + type: "string" + Cmd: + type: "array" + description: "Command to run, as a string or array of strings." + items: + type: "string" + Privileged: + type: "boolean" + description: "Runs the exec process with extended privileges." + default: false + User: + type: "string" + description: | + The user, and optionally, group to run the exec process inside + the container. Format is one of: `user`, `user:group`, `uid`, + or `uid:gid`. + WorkingDir: + type: "string" + description: | + The working directory for the exec process inside the container. + example: + AttachStdin: false + AttachStdout: true + AttachStderr: true + DetachKeys: "ctrl-p,ctrl-q" + Tty: false + Cmd: + - "date" + Env: + - "FOO=bar" + - "BAZ=quux" + required: true + - name: "id" + in: "path" + description: "ID or name of container" + type: "string" + required: true + tags: ["Exec"] + /exec/{id}/start: + post: + summary: "Start an exec instance" + description: | + Starts a previously set up exec instance. If detach is true, this endpoint + returns immediately after starting the command. Otherwise, it sets up an + interactive session with the command. + operationId: "ExecStart" + consumes: + - "application/json" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 200: + description: "No error" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Container is stopped or paused" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execStartConfig" + in: "body" + schema: + type: "object" + title: "ExecStartConfig" + properties: + Detach: + type: "boolean" + description: "Detach from the command." + example: false + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + example: true + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array." + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + example: [80, 64] + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + /exec/{id}/resize: + post: + summary: "Resize an exec instance" + description: | + Resize the TTY session used by an exec instance. This endpoint only works + if `tty` was specified as part of creating and starting the exec instance. + operationId: "ExecResize" + responses: + 200: + description: "No error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + - name: "h" + in: "query" + required: true + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + required: true + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Exec"] + /exec/{id}/json: + get: + summary: "Inspect an exec instance" + description: "Return low-level information about an exec instance." + operationId: "ExecInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ExecInspectResponse" + properties: + CanRemove: + type: "boolean" + DetachKeys: + type: "string" + ID: + type: "string" + Running: + type: "boolean" + ExitCode: + type: "integer" + ProcessConfig: + $ref: "#/definitions/ProcessConfig" + OpenStdin: + type: "boolean" + OpenStderr: + type: "boolean" + OpenStdout: + type: "boolean" + ContainerID: + type: "string" + Pid: + type: "integer" + description: "The system process ID for the exec process." + examples: + application/json: + CanRemove: false + ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" + DetachKeys: "" + ExitCode: 2 + ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" + OpenStderr: true + OpenStdin: true + OpenStdout: true + ProcessConfig: + arguments: + - "-c" + - "exit 2" + entrypoint: "sh" + privileged: false + tty: true + user: "1000" + Running: false + Pid: 42000 + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + + /volumes: + get: + summary: "List volumes" + operationId: "VolumeList" + produces: ["application/json"] + responses: + 200: + description: "Summary volume data that matches the query" + schema: + $ref: "#/definitions/VolumeListResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to + process on the volumes list. Available filters: + + - `dangling=` When set to `true` (or `1`), returns all + volumes that are not in use by a container. When set to `false` + (or `0`), only volumes that are in use by one or more + containers are returned. + - `driver=` Matches volumes based on their driver. + - `label=` or `label=:` Matches volumes based on + the presence of a `label` alone or a `label` and a value. + - `name=` Matches all or part of a volume name. + type: "string" + format: "json" + tags: ["Volume"] + + /volumes/create: + post: + summary: "Create a volume" + operationId: "VolumeCreate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 201: + description: "The volume was created successfully" + schema: + $ref: "#/definitions/Volume" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "volumeConfig" + in: "body" + required: true + description: "Volume configuration" + schema: + $ref: "#/definitions/VolumeCreateOptions" + tags: ["Volume"] + + /volumes/{name}: + get: + summary: "Inspect a volume" + operationId: "VolumeInspect" + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Volume" + 404: + description: "No such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + tags: ["Volume"] + + put: + summary: | + "Update a volume. Valid only for Swarm cluster volumes" + operationId: "VolumeUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name or ID of the volume" + type: "string" + required: true + - name: "body" + in: "body" + schema: + # though the schema for is an object that contains only a + # ClusterVolumeSpec, wrapping the ClusterVolumeSpec in this object + # means that if, later on, we support things like changing the + # labels, we can do so without duplicating that information to the + # ClusterVolumeSpec. + type: "object" + description: "Volume configuration" + properties: + Spec: + $ref: "#/definitions/ClusterVolumeSpec" + description: | + The spec of the volume to update. Currently, only Availability may + change. All other fields must remain unchanged. + - name: "version" + in: "query" + description: | + The version number of the volume being updated. This is required to + avoid conflicting writes. Found in the volume's `ClusterVolume` + field. + type: "integer" + format: "int64" + required: true + tags: ["Volume"] + + delete: + summary: "Remove a volume" + description: "Instruct the driver to remove the volume." + operationId: "VolumeDelete" + responses: + 204: + description: "The volume was removed" + 404: + description: "No such volume or volume driver" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Volume is in use and cannot be removed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + - name: "force" + in: "query" + description: "Force the removal of the volume" + type: "boolean" + default: false + tags: ["Volume"] + + /volumes/prune: + post: + summary: "Delete unused volumes" + produces: + - "application/json" + operationId: "VolumePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune volumes with (or without, in case `label!=...` is used) the specified labels. + - `all` (`all=true`) - Consider all (local) volumes for pruning and not just anonymous volumes. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "VolumePruneResponse" + properties: + VolumesDeleted: + description: "Volumes that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Volume"] + /networks: + get: + summary: "List networks" + description: | + Returns a list of networks. For details on the format, see the + [network inspect endpoint](#operation/NetworkInspect). + + Note that it uses a different, smaller representation of a network than + inspecting a single network. For example, the list of containers attached + to the network is not propagated in API versions 1.28 and up. + operationId: "NetworkList" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Network" + examples: + application/json: + - Name: "bridge" + Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" + Created: "2016-10-19T06:21:00.416543526Z" + Scope: "local" + Driver: "bridge" + EnableIPv4: true + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: + - + Subnet: "172.17.0.0/16" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + - Name: "none" + Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "null" + EnableIPv4: false + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + - Name: "host" + Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "host" + EnableIPv4: false + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to process + on the networks list. + + Available filters: + + - `dangling=` When set to `true` (or `1`), returns all + networks that are not in use by a container. When set to `false` + (or `0`), only networks that are in use by one or more + containers are returned. + - `driver=` Matches a network's driver. + - `id=` Matches all or part of a network ID. + - `label=` or `label==` of a network label. + - `name=` Matches all or part of a network name. + - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`). + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + type: "string" + tags: ["Network"] + + /networks/{id}: + get: + summary: "Inspect a network" + operationId: "NetworkInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Network" + 404: + description: "Network not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "verbose" + in: "query" + description: "Detailed inspect output for troubleshooting" + type: "boolean" + default: false + - name: "scope" + in: "query" + description: "Filter the network by scope (swarm, global, or local)" + type: "string" + tags: ["Network"] + + delete: + summary: "Remove a network" + operationId: "NetworkDelete" + responses: + 204: + description: "No error" + 403: + description: "operation not supported for pre-defined networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such network" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + tags: ["Network"] + + /networks/create: + post: + summary: "Create a network" + operationId: "NetworkCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "Network created successfully" + schema: + $ref: "#/definitions/NetworkCreateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: | + Forbidden operation. This happens when trying to create a network named after a pre-defined network, + or when trying to create an overlay network on a daemon which is not part of a Swarm cluster. + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "plugin not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "networkConfig" + in: "body" + description: "Network configuration" + required: true + schema: + type: "object" + title: "NetworkCreateRequest" + required: ["Name"] + properties: + Name: + description: "The network's name." + type: "string" + example: "my_network" + Driver: + description: "Name of the network driver plugin to use." + type: "string" + default: "bridge" + example: "bridge" + Scope: + description: | + The level at which the network exists (e.g. `swarm` for cluster-wide + or `local` for machine level). + type: "string" + Internal: + description: "Restrict external access to the network." + type: "boolean" + Attachable: + description: | + Globally scoped network is manually attachable by regular + containers from workers in swarm mode. + type: "boolean" + example: true + Ingress: + description: | + Ingress network is the network which provides the routing-mesh + in swarm mode. + type: "boolean" + example: false + ConfigOnly: + description: | + Creates a config-only network. Config-only networks are placeholder + networks for network configurations to be used by other networks. + Config-only networks cannot be used directly to run containers + or services. + type: "boolean" + default: false + example: false + ConfigFrom: + description: | + Specifies the source which will provide the configuration for + this network. The specified network must be an existing + config-only network; see ConfigOnly. + $ref: "#/definitions/ConfigReference" + IPAM: + description: "Optional custom IP scheme for the network." + $ref: "#/definitions/IPAM" + EnableIPv4: + description: "Enable IPv4 on the network." + type: "boolean" + example: true + EnableIPv6: + description: "Enable IPv6 on the network." + type: "boolean" + example: true + Options: + description: "Network specific options to be used by the drivers." + type: "object" + additionalProperties: + type: "string" + example: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + tags: ["Network"] + + /networks/{id}/connect: + post: + summary: "Connect a container to a network" + description: "The network must be either a local-scoped network or a swarm-scoped network with the `attachable` option set. A network cannot be re-attached to a running container" + operationId: "NetworkConnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "Operation forbidden" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + title: "NetworkConnectRequest" + properties: + Container: + type: "string" + description: "The ID or name of the container to connect to the network." + EndpointConfig: + $ref: "#/definitions/EndpointSettings" + example: + Container: "3613f73ba0e4" + EndpointConfig: + IPAMConfig: + IPv4Address: "172.24.56.89" + IPv6Address: "2001:db8::5689" + MacAddress: "02:42:ac:12:05:02" + Priority: 100 + tags: ["Network"] + + /networks/{id}/disconnect: + post: + summary: "Disconnect a container from a network" + operationId: "NetworkDisconnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + title: "NetworkDisconnectRequest" + properties: + Container: + type: "string" + description: | + The ID or name of the container to disconnect from the network. + Force: + type: "boolean" + description: | + Force the container to disconnect from the network. + tags: ["Network"] + /networks/prune: + post: + summary: "Delete unused networks" + produces: + - "application/json" + operationId: "NetworkPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=` Prune networks created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune networks with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "NetworkPruneResponse" + properties: + NetworksDeleted: + description: "Networks that were deleted" + type: "array" + items: + type: "string" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Network"] + /plugins: + get: + summary: "List plugins" + operationId: "PluginList" + description: "Returns information about installed plugins." + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Plugin" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the plugin list. + + Available filters: + + - `capability=` + - `enable=|` + tags: ["Plugin"] + + /plugins/privileges: + get: + summary: "Get plugin privileges" + operationId: "GetPluginPrivileges" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + tags: + - "Plugin" + + /plugins/pull: + post: + summary: "Install a plugin" + operationId: "PluginPull" + description: | + Pulls and installs a plugin. After the plugin is installed, it can be + enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). + produces: + - "application/json" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + Remote reference for plugin to install. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "name" + in: "query" + description: | + Local name for the pulled plugin. + + The `:latest` tag is optional, and is used as the default if omitted. + required: false + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration to use when pulling a plugin + from a registry. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/{name}/json: + get: + summary: "Inspect a plugin" + operationId: "PluginInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + tags: ["Plugin"] + /plugins/{name}: + delete: + summary: "Remove a plugin" + operationId: "PluginDelete" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "force" + in: "query" + description: | + Disable the plugin before removing. This may result in issues if the + plugin is in use by a container. + type: "boolean" + default: false + tags: ["Plugin"] + /plugins/{name}/enable: + post: + summary: "Enable a plugin" + operationId: "PluginEnable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "timeout" + in: "query" + description: "Set the HTTP client timeout (in seconds)" + type: "integer" + default: 0 + tags: ["Plugin"] + /plugins/{name}/disable: + post: + summary: "Disable a plugin" + operationId: "PluginDisable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "force" + in: "query" + description: | + Force disable a plugin even if still in use. + required: false + type: "boolean" + tags: ["Plugin"] + /plugins/{name}/upgrade: + post: + summary: "Upgrade a plugin" + operationId: "PluginUpgrade" + responses: + 204: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "remote" + in: "query" + description: | + Remote reference to upgrade to. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration to use when pulling a plugin + from a registry. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/create: + post: + summary: "Create a plugin" + operationId: "PluginCreate" + consumes: + - "application/x-tar" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "query" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "tarContext" + in: "body" + description: "Path to tar containing plugin rootfs and manifest" + schema: + type: "string" + format: "binary" + tags: ["Plugin"] + /plugins/{name}/push: + post: + summary: "Push a plugin" + operationId: "PluginPush" + description: | + Push a plugin to the registry. + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + responses: + 200: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /plugins/{name}/set: + post: + summary: "Configure a plugin" + operationId: "PluginSet" + consumes: + - "application/json" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + type: "string" + example: ["DEBUG=1"] + responses: + 204: + description: "No error" + 404: + description: "Plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /nodes: + get: + summary: "List nodes" + operationId: "NodeList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Node" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `id=` + - `label=` + - `membership=`(`accepted`|`pending`)` + - `name=` + - `node.label=` + - `role=`(`manager`|`worker`)` + type: "string" + tags: ["Node"] + /nodes/{id}: + get: + summary: "Inspect a node" + operationId: "NodeInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Node" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + tags: ["Node"] + delete: + summary: "Delete a node" + operationId: "NodeDelete" + responses: + 200: + description: "no error" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + - name: "force" + in: "query" + description: "Force remove a node from the swarm" + default: false + type: "boolean" + tags: ["Node"] + /nodes/{id}/update: + post: + summary: "Update a node" + operationId: "NodeUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID of the node" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/NodeSpec" + - name: "version" + in: "query" + description: | + The version number of the node object being updated. This is required + to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Node"] + /swarm: + get: + summary: "Inspect swarm" + operationId: "SwarmInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Swarm" + 404: + description: "no such swarm" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/init: + post: + summary: "Initialize a new swarm" + operationId: "SwarmInit" + produces: + - "application/json" + - "text/plain" + responses: + 200: + description: "no error" + schema: + description: "The node ID" + type: "string" + example: "7v2t30z9blmxuhnyo6s4cpenp" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmInitRequest" + properties: + ListenAddr: + description: | + Listen address used for inter-manager communication, as well + as determining the networking interface used for the VXLAN + Tunnel Endpoint (VTEP). This can either be an address/port + combination in the form `192.168.1.1:4567`, or an interface + followed by a port number, like `eth0:4567`. If the port number + is omitted, the default swarm listening port is used. + type: "string" + AdvertiseAddr: + description: | + Externally reachable address advertised to other nodes. This + can either be an address/port combination in the form + `192.168.1.1:4567`, or an interface followed by a port number, + like `eth0:4567`. If the port number is omitted, the port + number from the listen address is used. If `AdvertiseAddr` is + not specified, it will be automatically detected when possible. + type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: + ``), for example, `192.168.1.1`, or an interface, + like `eth0`. If `DataPathAddr` is unspecified, the same address + as `AdvertiseAddr` is used. + + The `DataPathAddr` specifies the address that global scope + network drivers will publish towards other nodes in order to + reach the containers running on this node. Using this parameter + it is possible to separate the container data traffic from the + management traffic of the cluster. + type: "string" + DataPathPort: + description: | + DataPathPort specifies the data path port number for data traffic. + Acceptable port range is 1024 to 49151. + if no port is set or is set to 0, default port 4789 will be used. + type: "integer" + format: "uint32" + DefaultAddrPool: + description: | + Default Address Pool specifies default subnet pools for global + scope networks. + type: "array" + items: + type: "string" + example: ["10.10.0.0/16", "20.20.0.0/16"] + ForceNewCluster: + description: "Force creation of a new swarm." + type: "boolean" + SubnetSize: + description: | + SubnetSize specifies the subnet size of the networks created + from the default subnet pool. + type: "integer" + format: "uint32" + Spec: + $ref: "#/definitions/SwarmSpec" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + DataPathPort: 4789 + DefaultAddrPool: ["10.10.0.0/8", "20.20.0.0/8"] + SubnetSize: 24 + ForceNewCluster: false + Spec: + Orchestration: {} + Raft: {} + Dispatcher: {} + CAConfig: {} + EncryptionConfig: + AutoLockManagers: false + tags: ["Swarm"] + /swarm/join: + post: + summary: "Join an existing swarm" + operationId: "SwarmJoin" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmJoinRequest" + properties: + ListenAddr: + description: | + Listen address used for inter-manager communication if the node + gets promoted to manager, as well as determining the networking + interface used for the VXLAN Tunnel Endpoint (VTEP). + type: "string" + AdvertiseAddr: + description: | + Externally reachable address advertised to other nodes. This + can either be an address/port combination in the form + `192.168.1.1:4567`, or an interface followed by a port number, + like `eth0:4567`. If the port number is omitted, the port + number from the listen address is used. If `AdvertiseAddr` is + not specified, it will be automatically detected when possible. + type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: + ``), for example, `192.168.1.1`, or an interface, + like `eth0`. If `DataPathAddr` is unspecified, the same address + as `AdvertiseAddr` is used. + + The `DataPathAddr` specifies the address that global scope + network drivers will publish towards other nodes in order to + reach the containers running on this node. Using this parameter + it is possible to separate the container data traffic from the + management traffic of the cluster. + + type: "string" + RemoteAddrs: + description: | + Addresses of manager nodes already participating in the swarm. + type: "array" + items: + type: "string" + JoinToken: + description: "Secret token for joining this swarm." + type: "string" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + DataPathAddr: "192.168.1.1" + RemoteAddrs: + - "node1:2377" + JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + tags: ["Swarm"] + /swarm/leave: + post: + summary: "Leave a swarm" + operationId: "SwarmLeave" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "force" + description: | + Force leave swarm, even if this is the last manager or that it will + break the cluster. + in: "query" + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/update: + post: + summary: "Update a swarm" + operationId: "SwarmUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + $ref: "#/definitions/SwarmSpec" + - name: "version" + in: "query" + description: | + The version number of the swarm object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + - name: "rotateWorkerToken" + in: "query" + description: "Rotate the worker join token." + type: "boolean" + default: false + - name: "rotateManagerToken" + in: "query" + description: "Rotate the manager join token." + type: "boolean" + default: false + - name: "rotateManagerUnlockKey" + in: "query" + description: "Rotate the manager unlock key." + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/unlockkey: + get: + summary: "Get the unlock key" + operationId: "SwarmUnlockkey" + consumes: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "UnlockKeyResponse" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/unlock: + post: + summary: "Unlock a locked manager" + operationId: "SwarmUnlock" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmUnlockRequest" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /services: + get: + summary: "List services" + operationId: "ServiceList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Service" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the services list. + + Available filters: + + - `id=` + - `label=` + - `mode=["replicated"|"global"]` + - `name=` + - name: "status" + in: "query" + type: "boolean" + description: | + Include service status, with count of running and desired tasks. + tags: ["Service"] + /services/create: + post: + summary: "Create a service" + operationId: "ServiceCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/ServiceCreateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "network is not eligible for services" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "name conflicts with an existing service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "web" + TaskTemplate: + ContainerSpec: + Image: "nginx:alpine" + Mounts: + - + ReadOnly: true + Source: "web-data" + Target: "/usr/share/nginx/html" + Type: "volume" + VolumeOptions: + DriverConfig: {} + Labels: + com.example.something: "something-value" + Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"] + User: "33" + DNSConfig: + Nameservers: ["8.8.8.8"] + Search: ["example.org"] + Options: ["timeout:3"] + Secrets: + - + File: + Name: "www.example.org.key" + UID: "33" + GID: "33" + Mode: 384 + SecretID: "fpjqlhnwb19zds35k8wn80lq9" + SecretName: "example_org_domain_key" + OomScoreAdj: 0 + LogDriver: + Name: "json-file" + Options: + max-file: "3" + max-size: "10M" + Placement: {} + Resources: + Limits: + MemoryBytes: 104857600 + Reservations: {} + RestartPolicy: + Condition: "on-failure" + Delay: 10000000000 + MaxAttempts: 10 + Mode: + Replicated: + Replicas: 4 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Ports: + - + Protocol: "tcp" + PublishedPort: 8080 + TargetPort: 80 + Labels: + foo: "bar" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration for pulling from private + registries. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + tags: ["Service"] + /services/{id}: + get: + summary: "Inspect a service" + operationId: "ServiceInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Service" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "insertDefaults" + in: "query" + description: "Fill empty fields with default values." + type: "boolean" + default: false + tags: ["Service"] + delete: + summary: "Delete a service" + operationId: "ServiceDelete" + responses: + 200: + description: "no error" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + tags: ["Service"] + /services/{id}/update: + post: + summary: "Update a service" + operationId: "ServiceUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ServiceUpdateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "top" + TaskTemplate: + ContainerSpec: + Image: "busybox" + Args: + - "top" + OomScoreAdj: 0 + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + + - name: "version" + in: "query" + description: | + The version number of the service object being updated. This is + required to avoid conflicting writes. + This version number should be the value as currently set on the + service *before* the update. You can find the current version by + calling `GET /services/{id}` + required: true + type: "integer" + - name: "registryAuthFrom" + in: "query" + description: | + If the `X-Registry-Auth` header is not specified, this parameter + indicates where to find registry authorization credentials. + type: "string" + enum: ["spec", "previous-spec"] + default: "spec" + - name: "rollback" + in: "query" + description: | + Set to this parameter to `previous` to cause a server-side rollback + to the previous service spec. The supplied spec will be ignored in + this case. + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration for pulling from private + registries. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + + tags: ["Service"] + /services/{id}/logs: + get: + summary: "Get service logs" + description: | + Get `stdout` and `stderr` logs from a service. See also + [`/containers/{id}/logs`](#operation/ContainerLogs). + + **Note**: This endpoint works only for services with the `local`, + `json-file` or `journald` logging drivers. + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + operationId: "ServiceLogs" + responses: + 200: + description: "logs returned as a stream in response body" + schema: + type: "string" + format: "binary" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such service: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the service" + type: "string" + - name: "details" + in: "query" + description: "Show service context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Service"] + /tasks: + get: + summary: "List tasks" + operationId: "TaskList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Task" + example: + - ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + - ID: "1yljwbmlr8er2waf8orvqpwms" + Version: + Index: 30 + CreatedAt: "2016-06-07T21:07:30.019104782Z" + UpdatedAt: "2016-06-07T21:07:30.231958098Z" + Name: "hopeful_cori" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:30.202183143Z" + State: "shutdown" + Message: "shutdown" + ContainerStatus: + ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" + DesiredState: "shutdown" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.5/16" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the tasks list. + + Available filters: + + - `desired-state=(running | shutdown | accepted)` + - `id=` + - `label=key` or `label="key=value"` + - `name=` + - `node=` + - `service=` + tags: ["Task"] + /tasks/{id}: + get: + summary: "Inspect a task" + operationId: "TaskInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Task" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID of the task" + required: true + type: "string" + tags: ["Task"] + /tasks/{id}/logs: + get: + summary: "Get task logs" + description: | + Get `stdout` and `stderr` logs from a task. + See also [`/containers/{id}/logs`](#operation/ContainerLogs). + + **Note**: This endpoint works only for services with the `local`, + `json-file` or `journald` logging drivers. + operationId: "TaskLogs" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 200: + description: "logs returned as a stream in response body" + schema: + type: "string" + format: "binary" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such task: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID of the task" + type: "string" + - name: "details" + in: "query" + description: "Show task context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Task"] + /secrets: + get: + summary: "List secrets" + operationId: "SecretList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Secret" + example: + - ID: "blt1owaxmitz71s9v5zh81zun" + Version: + Index: 85 + CreatedAt: "2017-07-20T13:55:28.678958722Z" + UpdatedAt: "2017-07-20T13:55:28.678958722Z" + Spec: + Name: "mysql-passwd" + Labels: + some.label: "some.value" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + Labels: + foo: "bar" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the secrets list. + + Available filters: + + - `id=` + - `label= or label==value` + - `name=` + - `names=` + tags: ["Secret"] + /secrets/create: + post: + summary: "Create a secret" + operationId: "SecretCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/SecretSpec" + - type: "object" + example: + Name: "app-key.crt" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + tags: ["Secret"] + /secrets/{id}: + get: + summary: "Inspect a secret" + operationId: "SecretInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Secret" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + Labels: + foo: "bar" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + delete: + summary: "Delete a secret" + operationId: "SecretDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + /secrets/{id}/update: + post: + summary: "Update a Secret" + operationId: "SecretUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such secret" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the secret" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/SecretSpec" + description: | + The spec of the secret to update. Currently, only the Labels field + can be updated. All other fields must remain unchanged from the + [SecretInspect endpoint](#operation/SecretInspect) response values. + - name: "version" + in: "query" + description: | + The version number of the secret object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Secret"] + /configs: + get: + summary: "List configs" + operationId: "ConfigList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Config" + example: + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "server.conf" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the configs list. + + Available filters: + + - `id=` + - `label= or label==value` + - `name=` + - `names=` + tags: ["Config"] + /configs/create: + post: + summary: "Create a config" + operationId: "ConfigCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/ConfigSpec" + - type: "object" + example: + Name: "server.conf" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + tags: ["Config"] + /configs/{id}: + get: + summary: "Inspect a config" + operationId: "ConfigInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Config" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + delete: + summary: "Delete a config" + operationId: "ConfigDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + /configs/{id}/update: + post: + summary: "Update a Config" + operationId: "ConfigUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such config" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the config" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/ConfigSpec" + description: | + The spec of the config to update. Currently, only the Labels field + can be updated. All other fields must remain unchanged from the + [ConfigInspect endpoint](#operation/ConfigInspect) response values. + - name: "version" + in: "query" + description: | + The version number of the config object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Config"] + /distribution/{name}/json: + get: + summary: "Get image information from the registry" + description: | + Return image digest and platform information by contacting the registry. + operationId: "DistributionInspect" + produces: + - "application/json" + responses: + 200: + description: "descriptor and platform information" + schema: + $ref: "#/definitions/DistributionInspect" + 401: + description: "Failed authentication or no image found" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + tags: ["Distribution"] + /session: + post: + summary: "Initialize interactive session" + description: | + Start a new interactive session with a server. Session allows server to + call back to the client for advanced capabilities. + + ### Hijacking + + This endpoint hijacks the HTTP connection to HTTP2 transport that allows + the client to expose gPRC services on that connection. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /session HTTP/1.1 + Upgrade: h2c + Connection: Upgrade + ``` + + The Docker daemon responds with a `101 UPGRADED` response follow with + the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Connection: Upgrade + Upgrade: h2c + ``` + operationId: "Session" + produces: + - "application/vnd.docker.raw-stream" + responses: + 101: + description: "no error, hijacking successful" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Session"] diff --git a/_vendor/github.com/moby/moby/docs/api/v1.49.yaml b/_vendor/github.com/moby/moby/docs/api/v1.49.yaml new file mode 100644 index 000000000000..2034fdefd990 --- /dev/null +++ b/_vendor/github.com/moby/moby/docs/api/v1.49.yaml @@ -0,0 +1,13536 @@ +# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API. +# +# This is used for generating API documentation and the types used by the +# client/server. See api/README.md for more information. +# +# Some style notes: +# - This file is used by ReDoc, which allows GitHub Flavored Markdown in +# descriptions. +# - There is no maximum line length, for ease of editing and pretty diffs. +# - operationIds are in the format "NounVerb", with a singular noun. + +swagger: "2.0" +schemes: + - "http" + - "https" +produces: + - "application/json" + - "text/plain" +consumes: + - "application/json" + - "text/plain" +basePath: "/v1.49" +info: + title: "Docker Engine API" + version: "1.49" + x-logo: + url: "https://docs.docker.com/assets/images/logo-docker-main.png" + description: | + The Engine API is an HTTP API served by Docker Engine. It is the API the + Docker client uses to communicate with the Engine, so everything the Docker + client can do can be done with the API. + + Most of the client's commands map directly to API endpoints (e.g. `docker ps` + is `GET /containers/json`). The notable exception is running containers, + which consists of several API calls. + + # Errors + + The API uses standard HTTP status codes to indicate the success or failure + of the API call. The body of the response will be JSON in the following + format: + + ``` + { + "message": "page not found" + } + ``` + + # Versioning + + The API is usually changed in each release, so API calls are versioned to + ensure that clients don't break. To lock to a specific version of the API, + you prefix the URL with its version, for example, call `/v1.30/info` to use + the v1.30 version of the `/info` endpoint. If the API version specified in + the URL is not supported by the daemon, a HTTP `400 Bad Request` error message + is returned. + + If you omit the version-prefix, the current version of the API (v1.49) is used. + For example, calling `/info` is the same as calling `/v1.49/info`. Using the + API without a version-prefix is deprecated and will be removed in a future release. + + Engine releases in the near future should support this version of the API, + so your client will continue to work even if it is talking to a newer Engine. + + The API uses an open schema model, which means the server may add extra properties + to responses. Likewise, the server will ignore any extra query parameters and + request body properties. When you write clients, you need to ignore additional + properties in responses to ensure they do not break when talking to newer + daemons. + + + # Authentication + + Authentication for registries is handled client side. The client has to send + authentication details to various endpoints that need to communicate with + registries, such as `POST /images/(name)/push`. These are sent as + `X-Registry-Auth` header as a [base64url encoded](https://tools.ietf.org/html/rfc4648#section-5) + (JSON) string with the following structure: + + ``` + { + "username": "string", + "password": "string", + "email": "string", + "serveraddress": "string" + } + ``` + + The `serveraddress` is a domain/IP without a protocol. Throughout this + structure, double quotes are required. + + If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), + you can just pass this instead of credentials: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +# The tags on paths define the menu sections in the ReDoc documentation, so +# the usage of tags must make sense for that: +# - They should be singular, not plural. +# - There should not be too many tags, or the menu becomes unwieldy. For +# example, it is preferable to add a path to the "System" tag instead of +# creating a tag with a single path in it. +# - The order of tags in this list defines the order in the menu. +tags: + # Primary objects + - name: "Container" + x-displayName: "Containers" + description: | + Create and manage containers. + - name: "Image" + x-displayName: "Images" + - name: "Network" + x-displayName: "Networks" + description: | + Networks are user-defined networks that containers can be attached to. + See the [networking documentation](https://docs.docker.com/network/) + for more information. + - name: "Volume" + x-displayName: "Volumes" + description: | + Create and manage persistent storage that can be attached to containers. + - name: "Exec" + x-displayName: "Exec" + description: | + Run new commands inside running containers. Refer to the + [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) + for more information. + + To exec a command in a container, you first need to create an exec instance, + then start it. These two API endpoints are wrapped up in a single command-line + command, `docker exec`. + + # Swarm things + - name: "Swarm" + x-displayName: "Swarm" + description: | + Engines can be clustered together in a swarm. Refer to the + [swarm mode documentation](https://docs.docker.com/engine/swarm/) + for more information. + - name: "Node" + x-displayName: "Nodes" + description: | + Nodes are instances of the Engine participating in a swarm. Swarm mode + must be enabled for these endpoints to work. + - name: "Service" + x-displayName: "Services" + description: | + Services are the definitions of tasks to run on a swarm. Swarm mode must + be enabled for these endpoints to work. + - name: "Task" + x-displayName: "Tasks" + description: | + A task is a container running on a swarm. It is the atomic scheduling unit + of swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Secret" + x-displayName: "Secrets" + description: | + Secrets are sensitive data that can be used by services. Swarm mode must + be enabled for these endpoints to work. + - name: "Config" + x-displayName: "Configs" + description: | + Configs are application configurations that can be used by services. Swarm + mode must be enabled for these endpoints to work. + # System things + - name: "Plugin" + x-displayName: "Plugins" + - name: "System" + x-displayName: "System" + +definitions: + Port: + type: "object" + description: "An open port on a container" + required: [PrivatePort, Type] + properties: + IP: + type: "string" + format: "ip-address" + description: "Host IP address that the container's port is mapped to" + PrivatePort: + type: "integer" + format: "uint16" + x-nullable: false + description: "Port on the container" + PublicPort: + type: "integer" + format: "uint16" + description: "Port exposed on the host" + Type: + type: "string" + x-nullable: false + enum: ["tcp", "udp", "sctp"] + example: + PrivatePort: 8080 + PublicPort: 80 + Type: "tcp" + + MountPoint: + type: "object" + description: | + MountPoint represents a mount point configuration inside the container. + This is used for reporting the mountpoints in use by a container. + properties: + Type: + description: | + The mount type: + + - `bind` a mount of a file or directory from the host into the container. + - `volume` a docker volume with the given `Name`. + - `image` a docker image + - `tmpfs` a `tmpfs`. + - `npipe` a named pipe from the host into the container. + - `cluster` a Swarm cluster volume + type: "string" + enum: + - "bind" + - "volume" + - "image" + - "tmpfs" + - "npipe" + - "cluster" + example: "volume" + Name: + description: | + Name is the name reference to the underlying data defined by `Source` + e.g., the volume name. + type: "string" + example: "myvolume" + Source: + description: | + Source location of the mount. + + For volumes, this contains the storage location of the volume (within + `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains + the source (host) part of the bind-mount. For `tmpfs` mount points, this + field is empty. + type: "string" + example: "/var/lib/docker/volumes/myvolume/_data" + Destination: + description: | + Destination is the path relative to the container root (`/`) where + the `Source` is mounted inside the container. + type: "string" + example: "/usr/share/nginx/html/" + Driver: + description: | + Driver is the volume driver used to create the volume (if it is a volume). + type: "string" + example: "local" + Mode: + description: | + Mode is a comma separated list of options supplied by the user when + creating the bind/volume mount. + + The default is platform-specific (`"z"` on Linux, empty on Windows). + type: "string" + example: "z" + RW: + description: | + Whether the mount is mounted writable (read-write). + type: "boolean" + example: true + Propagation: + description: | + Propagation describes how mounts are propagated from the host into the + mount point, and vice-versa. Refer to the [Linux kernel documentation](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt) + for details. This field is not used on Windows. + type: "string" + example: "" + + DeviceMapping: + type: "object" + description: "A device mapping between the host and container" + properties: + PathOnHost: + type: "string" + PathInContainer: + type: "string" + CgroupPermissions: + type: "string" + example: + PathOnHost: "/dev/deviceName" + PathInContainer: "/dev/deviceName" + CgroupPermissions: "mrw" + + DeviceRequest: + type: "object" + description: "A request for devices to be sent to device drivers" + properties: + Driver: + type: "string" + example: "nvidia" + Count: + type: "integer" + example: -1 + DeviceIDs: + type: "array" + items: + type: "string" + example: + - "0" + - "1" + - "GPU-fef8089b-4820-abfc-e83e-94318197576e" + Capabilities: + description: | + A list of capabilities; an OR list of AND lists of capabilities. + type: "array" + items: + type: "array" + items: + type: "string" + example: + # gpu AND nvidia AND compute + - ["gpu", "nvidia", "compute"] + Options: + description: | + Driver-specific options, specified as a key/value pairs. These options + are passed directly to the driver. + type: "object" + additionalProperties: + type: "string" + + ThrottleDevice: + type: "object" + properties: + Path: + description: "Device path" + type: "string" + Rate: + description: "Rate" + type: "integer" + format: "int64" + minimum: 0 + + Mount: + type: "object" + properties: + Target: + description: "Container path." + type: "string" + Source: + description: "Mount source (e.g. a volume name, a host path)." + type: "string" + Type: + description: | + The mount type. Available types: + + - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. + - `image` Mounts an image. + - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. + - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. + - `cluster` a Swarm cluster volume + type: "string" + enum: + - "bind" + - "volume" + - "image" + - "tmpfs" + - "npipe" + - "cluster" + ReadOnly: + description: "Whether the mount should be read-only." + type: "boolean" + Consistency: + description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`." + type: "string" + BindOptions: + description: "Optional configuration for the `bind` type." + type: "object" + properties: + Propagation: + description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." + type: "string" + enum: + - "private" + - "rprivate" + - "shared" + - "rshared" + - "slave" + - "rslave" + NonRecursive: + description: "Disable recursive bind mount." + type: "boolean" + default: false + CreateMountpoint: + description: "Create mount point on host if missing" + type: "boolean" + default: false + ReadOnlyNonRecursive: + description: | + Make the mount non-recursively read-only, but still leave the mount recursive + (unless NonRecursive is set to `true` in conjunction). + + Added in v1.44, before that version all read-only mounts were + non-recursive by default. To match the previous behaviour this + will default to `true` for clients on versions prior to v1.44. + type: "boolean" + default: false + ReadOnlyForceRecursive: + description: "Raise an error if the mount cannot be made recursively read-only." + type: "boolean" + default: false + VolumeOptions: + description: "Optional configuration for the `volume` type." + type: "object" + properties: + NoCopy: + description: "Populate volume with data from the target." + type: "boolean" + default: false + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + DriverConfig: + description: "Map of driver specific options" + type: "object" + properties: + Name: + description: "Name of the driver to use to create the volume." + type: "string" + Options: + description: "key/value map of driver specific options." + type: "object" + additionalProperties: + type: "string" + Subpath: + description: "Source path inside the volume. Must be relative without any back traversals." + type: "string" + example: "dir-inside-volume/subdirectory" + ImageOptions: + description: "Optional configuration for the `image` type." + type: "object" + properties: + Subpath: + description: "Source path inside the image. Must be relative without any back traversals." + type: "string" + example: "dir-inside-image/subdirectory" + TmpfsOptions: + description: "Optional configuration for the `tmpfs` type." + type: "object" + properties: + SizeBytes: + description: "The size for the tmpfs mount in bytes." + type: "integer" + format: "int64" + Mode: + description: "The permission mode for the tmpfs mount in an integer." + type: "integer" + Options: + description: | + The options to be passed to the tmpfs mount. An array of arrays. + Flag options should be provided as 1-length arrays. Other types + should be provided as as 2-length arrays, where the first item is + the key and the second the value. + type: "array" + items: + type: "array" + minItems: 1 + maxItems: 2 + items: + type: "string" + example: + [["noexec"]] + + RestartPolicy: + description: | + The behavior to apply when the container exits. The default is not to + restart. + + An ever increasing delay (double the previous delay, starting at 100ms) is + added before each restart to prevent flooding the server. + type: "object" + properties: + Name: + type: "string" + description: | + - Empty string means not to restart + - `no` Do not automatically restart + - `always` Always restart + - `unless-stopped` Restart always except when the user has manually stopped the container + - `on-failure` Restart only when the container exit code is non-zero + enum: + - "" + - "no" + - "always" + - "unless-stopped" + - "on-failure" + MaximumRetryCount: + type: "integer" + description: | + If `on-failure` is used, the number of times to retry before giving up. + + Resources: + description: "A container's resources (cgroups config, ulimits, etc)" + type: "object" + properties: + # Applicable to all platforms + CpuShares: + description: | + An integer value representing this container's relative CPU weight + versus other containers. + type: "integer" + Memory: + description: "Memory limit in bytes." + type: "integer" + format: "int64" + default: 0 + # Applicable to UNIX platforms + CgroupParent: + description: | + Path to `cgroups` under which the container's `cgroup` is created. If + the path is not absolute, the path is considered to be relative to the + `cgroups` path of the init process. Cgroups are created if they do not + already exist. + type: "string" + BlkioWeight: + description: "Block IO weight (relative weight)." + type: "integer" + minimum: 0 + maximum: 1000 + BlkioWeightDevice: + description: | + Block IO weight (relative device weight) in the form: + + ``` + [{"Path": "device_path", "Weight": weight}] + ``` + type: "array" + items: + type: "object" + properties: + Path: + type: "string" + Weight: + type: "integer" + minimum: 0 + BlkioDeviceReadBps: + description: | + Limit read rate (bytes per second) from a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteBps: + description: | + Limit write rate (bytes per second) to a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceReadIOps: + description: | + Limit read rate (IO per second) from a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteIOps: + description: | + Limit write rate (IO per second) to a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + CpuPeriod: + description: "The length of a CPU period in microseconds." + type: "integer" + format: "int64" + CpuQuota: + description: | + Microseconds of CPU time that the container can get in a CPU period. + type: "integer" + format: "int64" + CpuRealtimePeriod: + description: | + The length of a CPU real-time period in microseconds. Set to 0 to + allocate no time allocated to real-time tasks. + type: "integer" + format: "int64" + CpuRealtimeRuntime: + description: | + The length of a CPU real-time runtime in microseconds. Set to 0 to + allocate no time allocated to real-time tasks. + type: "integer" + format: "int64" + CpusetCpus: + description: | + CPUs in which to allow execution (e.g., `0-3`, `0,1`). + type: "string" + example: "0-3" + CpusetMems: + description: | + Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only + effective on NUMA systems. + type: "string" + Devices: + description: "A list of devices to add to the container." + type: "array" + items: + $ref: "#/definitions/DeviceMapping" + DeviceCgroupRules: + description: "a list of cgroup rules to apply to the container" + type: "array" + items: + type: "string" + example: "c 13:* rwm" + DeviceRequests: + description: | + A list of requests for devices to be sent to device drivers. + type: "array" + items: + $ref: "#/definitions/DeviceRequest" + KernelMemoryTCP: + description: | + Hard limit for kernel TCP buffer memory (in bytes). Depending on the + OCI runtime in use, this option may be ignored. It is no longer supported + by the default (runc) runtime. + + This field is omitted when empty. + type: "integer" + format: "int64" + MemoryReservation: + description: "Memory soft limit in bytes." + type: "integer" + format: "int64" + MemorySwap: + description: | + Total memory limit (memory + swap). Set as `-1` to enable unlimited + swap. + type: "integer" + format: "int64" + MemorySwappiness: + description: | + Tune a container's memory swappiness behavior. Accepts an integer + between 0 and 100. + type: "integer" + format: "int64" + minimum: 0 + maximum: 100 + NanoCpus: + description: "CPU quota in units of 10-9 CPUs." + type: "integer" + format: "int64" + OomKillDisable: + description: "Disable OOM Killer for the container." + type: "boolean" + Init: + description: | + Run an init inside the container that forwards signals and reaps + processes. This field is omitted if empty, and the default (as + configured on the daemon) is used. + type: "boolean" + x-nullable: true + PidsLimit: + description: | + Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` + to not change. + type: "integer" + format: "int64" + x-nullable: true + Ulimits: + description: | + A list of resource limits to set in the container. For example: + + ``` + {"Name": "nofile", "Soft": 1024, "Hard": 2048} + ``` + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + # Applicable to Windows + CpuCount: + description: | + The number of usable CPUs (Windows only). + + On Windows Server containers, the processor resource controls are + mutually exclusive. The order of precedence is `CPUCount` first, then + `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + CpuPercent: + description: | + The usable percentage of the available CPUs (Windows only). + + On Windows Server containers, the processor resource controls are + mutually exclusive. The order of precedence is `CPUCount` first, then + `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + IOMaximumIOps: + description: "Maximum IOps for the container system drive (Windows only)" + type: "integer" + format: "int64" + IOMaximumBandwidth: + description: | + Maximum IO in bytes per second for the container system drive + (Windows only). + type: "integer" + format: "int64" + + Limit: + description: | + An object describing a limit on resources which can be requested by a task. + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + example: 4000000000 + MemoryBytes: + type: "integer" + format: "int64" + example: 8272408576 + Pids: + description: | + Limits the maximum number of PIDs in the container. Set `0` for unlimited. + type: "integer" + format: "int64" + default: 0 + example: 100 + + ResourceObject: + description: | + An object describing the resources which can be advertised by a node and + requested by a task. + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + example: 4000000000 + MemoryBytes: + type: "integer" + format: "int64" + example: 8272408576 + GenericResources: + $ref: "#/definitions/GenericResources" + + GenericResources: + description: | + User-defined resources can be either Integer resources (e.g, `SSD=3`) or + String resources (e.g, `GPU=UUID1`). + type: "array" + items: + type: "object" + properties: + NamedResourceSpec: + type: "object" + properties: + Kind: + type: "string" + Value: + type: "string" + DiscreteResourceSpec: + type: "object" + properties: + Kind: + type: "string" + Value: + type: "integer" + format: "int64" + example: + - DiscreteResourceSpec: + Kind: "SSD" + Value: 3 + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID1" + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID2" + + HealthConfig: + description: "A test to perform to check that the container is healthy." + type: "object" + properties: + Test: + description: | + The test to perform. Possible values are: + + - `[]` inherit healthcheck from image or parent image + - `["NONE"]` disable healthcheck + - `["CMD", args...]` exec arguments directly + - `["CMD-SHELL", command]` run command with system's default shell + type: "array" + items: + type: "string" + Interval: + description: | + The time to wait between checks in nanoseconds. It should be 0 or at + least 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + Timeout: + description: | + The time to wait before considering the check to have hung. It should + be 0 or at least 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + Retries: + description: | + The number of consecutive failures needed to consider a container as + unhealthy. 0 means inherit. + type: "integer" + StartPeriod: + description: | + Start period for the container to initialize before starting + health-retries countdown in nanoseconds. It should be 0 or at least + 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + StartInterval: + description: | + The time to wait between checks in nanoseconds during the start period. + It should be 0 or at least 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + + Health: + description: | + Health stores information about the container's healthcheck results. + type: "object" + x-nullable: true + properties: + Status: + description: | + Status is one of `none`, `starting`, `healthy` or `unhealthy` + + - "none" Indicates there is no healthcheck + - "starting" Starting indicates that the container is not yet ready + - "healthy" Healthy indicates that the container is running correctly + - "unhealthy" Unhealthy indicates that the container has a problem + type: "string" + enum: + - "none" + - "starting" + - "healthy" + - "unhealthy" + example: "healthy" + FailingStreak: + description: "FailingStreak is the number of consecutive failures" + type: "integer" + example: 0 + Log: + type: "array" + description: | + Log contains the last few results (oldest first) + items: + $ref: "#/definitions/HealthcheckResult" + + HealthcheckResult: + description: | + HealthcheckResult stores information about a single run of a healthcheck probe + type: "object" + x-nullable: true + properties: + Start: + description: | + Date and time at which this check started in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "date-time" + example: "2020-01-04T10:44:24.496525531Z" + End: + description: | + Date and time at which this check ended in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2020-01-04T10:45:21.364524523Z" + ExitCode: + description: | + ExitCode meanings: + + - `0` healthy + - `1` unhealthy + - `2` reserved (considered unhealthy) + - other values: error running probe + type: "integer" + example: 0 + Output: + description: "Output from last check" + type: "string" + + HostConfig: + description: "Container configuration that depends on the host we are running on" + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + # Applicable to all platforms + Binds: + type: "array" + description: | + A list of volume bindings for this container. Each volume binding + is a string in one of these forms: + + - `host-src:container-dest[:options]` to bind-mount a host path + into the container. Both `host-src`, and `container-dest` must + be an _absolute_ path. + - `volume-name:container-dest[:options]` to bind-mount a volume + managed by a volume driver into the container. `container-dest` + must be an _absolute_ path. + + `options` is an optional, comma-delimited list of: + + - `nocopy` disables automatic copying of data from the container + path to the volume. The `nocopy` flag only applies to named volumes. + - `[ro|rw]` mounts a volume read-only or read-write, respectively. + If omitted or set to `rw`, volumes are mounted read-write. + - `[z|Z]` applies SELinux labels to allow or deny multiple containers + to read and write to the same volume. + - `z`: a _shared_ content label is applied to the content. This + label indicates that multiple containers can share the volume + content, for both reading and writing. + - `Z`: a _private unshared_ label is applied to the content. + This label indicates that only the current container can use + a private volume. Labeling systems such as SELinux require + proper labels to be placed on volume content that is mounted + into a container. Without a label, the security system can + prevent a container's processes from using the content. By + default, the labels set by the host operating system are not + modified. + - `[[r]shared|[r]slave|[r]private]` specifies mount + [propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt). + This only applies to bind-mounted volumes, not internal volumes + or named volumes. Mount propagation requires the source mount + point (the location where the source directory is mounted in the + host operating system) to have the correct propagation properties. + For shared volumes, the source mount point must be set to `shared`. + For slave volumes, the mount must be set to either `shared` or + `slave`. + items: + type: "string" + ContainerIDFile: + type: "string" + description: "Path to a file where the container ID is written" + example: "" + LogConfig: + type: "object" + description: "The logging configuration for this container" + properties: + Type: + description: |- + Name of the logging driver used for the container or "none" + if logging is disabled. + type: "string" + enum: + - "local" + - "json-file" + - "syslog" + - "journald" + - "gelf" + - "fluentd" + - "awslogs" + - "splunk" + - "etwlogs" + - "none" + Config: + description: |- + Driver-specific configuration options for the logging driver. + type: "object" + additionalProperties: + type: "string" + example: + "max-file": "5" + "max-size": "10m" + NetworkMode: + type: "string" + description: | + Network mode to use for this container. Supported standard values + are: `bridge`, `host`, `none`, and `container:`. Any + other value is taken as a custom network's name to which this + container should connect to. + PortBindings: + $ref: "#/definitions/PortMap" + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + AutoRemove: + type: "boolean" + description: | + Automatically remove the container when the container's process + exits. This has no effect if `RestartPolicy` is set. + VolumeDriver: + type: "string" + description: "Driver that this container uses to mount volumes." + VolumesFrom: + type: "array" + description: | + A list of volumes to inherit from another container, specified in + the form `[:]`. + items: + type: "string" + Mounts: + description: | + Specification for mounts to be added to the container. + type: "array" + items: + $ref: "#/definitions/Mount" + ConsoleSize: + type: "array" + description: | + Initial console size, as an `[height, width]` array. + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + example: [80, 64] + Annotations: + type: "object" + description: | + Arbitrary non-identifying metadata attached to container and + provided to the runtime when the container is started. + additionalProperties: + type: "string" + + # Applicable to UNIX platforms + CapAdd: + type: "array" + description: | + A list of kernel capabilities to add to the container. Conflicts + with option 'Capabilities'. + items: + type: "string" + CapDrop: + type: "array" + description: | + A list of kernel capabilities to drop from the container. Conflicts + with option 'Capabilities'. + items: + type: "string" + CgroupnsMode: + type: "string" + enum: + - "private" + - "host" + description: | + cgroup namespace mode for the container. Possible values are: + + - `"private"`: the container runs in its own private cgroup namespace + - `"host"`: use the host system's cgroup namespace + + If not specified, the daemon default is used, which can either be `"private"` + or `"host"`, depending on daemon version, kernel support and configuration. + Dns: + type: "array" + description: "A list of DNS servers for the container to use." + items: + type: "string" + DnsOptions: + type: "array" + description: "A list of DNS options." + items: + type: "string" + DnsSearch: + type: "array" + description: "A list of DNS search domains." + items: + type: "string" + ExtraHosts: + type: "array" + description: | + A list of hostnames/IP mappings to add to the container's `/etc/hosts` + file. Specified in the form `["hostname:IP"]`. + items: + type: "string" + GroupAdd: + type: "array" + description: | + A list of additional groups that the container process will run as. + items: + type: "string" + IpcMode: + type: "string" + description: | + IPC sharing mode for the container. Possible values are: + + - `"none"`: own private IPC namespace, with /dev/shm not mounted + - `"private"`: own private IPC namespace + - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers + - `"container:"`: join another (shareable) container's IPC namespace + - `"host"`: use the host system's IPC namespace + + If not specified, daemon default is used, which can either be `"private"` + or `"shareable"`, depending on daemon version and configuration. + Cgroup: + type: "string" + description: "Cgroup to use for the container." + Links: + type: "array" + description: | + A list of links for the container in the form `container_name:alias`. + items: + type: "string" + OomScoreAdj: + type: "integer" + description: | + An integer value containing the score given to the container in + order to tune OOM killer preferences. + example: 500 + PidMode: + type: "string" + description: | + Set the PID (Process) Namespace mode for the container. It can be + either: + + - `"container:"`: joins another container's PID namespace + - `"host"`: use the host's PID namespace inside the container + Privileged: + type: "boolean" + description: |- + Gives the container full access to the host. + PublishAllPorts: + type: "boolean" + description: | + Allocates an ephemeral host port for all of a container's + exposed ports. + + Ports are de-allocated when the container stops and allocated when + the container starts. The allocated port might be changed when + restarting the container. + + The port is selected from the ephemeral port range that depends on + the kernel. For example, on Linux the range is defined by + `/proc/sys/net/ipv4/ip_local_port_range`. + ReadonlyRootfs: + type: "boolean" + description: "Mount the container's root filesystem as read only." + SecurityOpt: + type: "array" + description: | + A list of string values to customize labels for MLS systems, such + as SELinux. + items: + type: "string" + StorageOpt: + type: "object" + description: | + Storage driver options for this container, in the form `{"size": "120G"}`. + additionalProperties: + type: "string" + Tmpfs: + type: "object" + description: | + A map of container directories which should be replaced by tmpfs + mounts, and their corresponding mount options. For example: + + ``` + { "/run": "rw,noexec,nosuid,size=65536k" } + ``` + additionalProperties: + type: "string" + UTSMode: + type: "string" + description: "UTS namespace to use for the container." + UsernsMode: + type: "string" + description: | + Sets the usernamespace mode for the container when usernamespace + remapping option is enabled. + ShmSize: + type: "integer" + format: "int64" + description: | + Size of `/dev/shm` in bytes. If omitted, the system uses 64MB. + minimum: 0 + Sysctls: + type: "object" + x-nullable: true + description: |- + A list of kernel parameters (sysctls) to set in the container. + + This field is omitted if not set. + additionalProperties: + type: "string" + example: + "net.ipv4.ip_forward": "1" + Runtime: + type: "string" + x-nullable: true + description: |- + Runtime to use with this container. + # Applicable to Windows + Isolation: + type: "string" + description: | + Isolation technology of the container. (Windows only) + enum: + - "default" + - "process" + - "hyperv" + - "" + MaskedPaths: + type: "array" + description: | + The list of paths to be masked inside the container (this overrides + the default set of paths). + items: + type: "string" + example: + - "/proc/asound" + - "/proc/acpi" + - "/proc/kcore" + - "/proc/keys" + - "/proc/latency_stats" + - "/proc/timer_list" + - "/proc/timer_stats" + - "/proc/sched_debug" + - "/proc/scsi" + - "/sys/firmware" + - "/sys/devices/virtual/powercap" + ReadonlyPaths: + type: "array" + description: | + The list of paths to be set as read-only inside the container + (this overrides the default set of paths). + items: + type: "string" + example: + - "/proc/bus" + - "/proc/fs" + - "/proc/irq" + - "/proc/sys" + - "/proc/sysrq-trigger" + + ContainerConfig: + description: | + Configuration for a container that is portable between hosts. + type: "object" + properties: + Hostname: + description: | + The hostname to use for the container, as a valid RFC 1123 hostname. + type: "string" + example: "439f4e91bd1d" + Domainname: + description: | + The domain name to use for the container. + type: "string" + User: + description: |- + Commands run as this user inside the container. If omitted, commands + run as the user specified in the image the container was started from. + + Can be either user-name or UID, and optional group-name or GID, + separated by a colon (`[<:group-name|GID>]`). + type: "string" + example: "123:456" + AttachStdin: + description: "Whether to attach to `stdin`." + type: "boolean" + default: false + AttachStdout: + description: "Whether to attach to `stdout`." + type: "boolean" + default: true + AttachStderr: + description: "Whether to attach to `stderr`." + type: "boolean" + default: true + ExposedPorts: + description: | + An object mapping ports to an empty object in the form: + + `{"/": {}}` + type: "object" + x-nullable: true + additionalProperties: + type: "object" + enum: + - {} + default: {} + example: { + "80/tcp": {}, + "443/tcp": {} + } + Tty: + description: | + Attach standard streams to a TTY, including `stdin` if it is not closed. + type: "boolean" + default: false + OpenStdin: + description: "Open `stdin`" + type: "boolean" + default: false + StdinOnce: + description: "Close `stdin` after one attached client disconnects" + type: "boolean" + default: false + Env: + description: | + A list of environment variables to set inside the container in the + form `["VAR=value", ...]`. A variable without `=` is removed from the + environment, rather than to have an empty value. + type: "array" + items: + type: "string" + example: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Cmd: + description: | + Command to run specified as a string or an array of strings. + type: "array" + items: + type: "string" + example: ["/bin/sh"] + Healthcheck: + $ref: "#/definitions/HealthConfig" + ArgsEscaped: + description: "Command is already escaped (Windows only)" + type: "boolean" + default: false + example: false + x-nullable: true + Image: + description: | + The name (or reference) of the image to use when creating the container, + or which was used when the container was created. + type: "string" + example: "example-image:1.0" + Volumes: + description: | + An object mapping mount point paths inside the container to empty + objects. + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + WorkingDir: + description: "The working directory for commands to run in." + type: "string" + example: "/public/" + Entrypoint: + description: | + The entry point for the container as a string or an array of strings. + + If the array consists of exactly one empty string (`[""]`) then the + entry point is reset to system default (i.e., the entry point used by + docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + type: "array" + items: + type: "string" + example: [] + NetworkDisabled: + description: "Disable networking for the container." + type: "boolean" + x-nullable: true + MacAddress: + description: | + MAC address of the container. + + Deprecated: this field is deprecated in API v1.44 and up. Use EndpointSettings.MacAddress instead. + type: "string" + x-nullable: true + OnBuild: + description: | + `ONBUILD` metadata that were defined in the image's `Dockerfile`. + type: "array" + x-nullable: true + items: + type: "string" + example: [] + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + StopSignal: + description: | + Signal to stop a container as a string or unsigned integer. + type: "string" + example: "SIGTERM" + x-nullable: true + StopTimeout: + description: "Timeout to stop a container in seconds." + type: "integer" + default: 10 + x-nullable: true + Shell: + description: | + Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. + type: "array" + x-nullable: true + items: + type: "string" + example: ["/bin/sh", "-c"] + + ImageConfig: + description: | + Configuration of the image. These fields are used as defaults + when starting a container from the image. + type: "object" + properties: + Hostname: + description: | + The hostname to use for the container, as a valid RFC 1123 hostname. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always empty. It must not be used, and will be removed in API v1.50. + type: "string" + example: "" + Domainname: + description: | + The domain name to use for the container. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always empty. It must not be used, and will be removed in API v1.50. + type: "string" + example: "" + User: + description: "The user that commands are run as inside the container." + type: "string" + example: "web:web" + AttachStdin: + description: | + Whether to attach to `stdin`. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always false. It must not be used, and will be removed in API v1.50. + type: "boolean" + default: false + example: false + AttachStdout: + description: | + Whether to attach to `stdout`. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always false. It must not be used, and will be removed in API v1.50. + type: "boolean" + default: false + example: false + AttachStderr: + description: | + Whether to attach to `stderr`. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always false. It must not be used, and will be removed in API v1.50. + type: "boolean" + default: false + example: false + ExposedPorts: + description: | + An object mapping ports to an empty object in the form: + + `{"/": {}}` + type: "object" + x-nullable: true + additionalProperties: + type: "object" + enum: + - {} + default: {} + example: { + "80/tcp": {}, + "443/tcp": {} + } + Tty: + description: | + Attach standard streams to a TTY, including `stdin` if it is not closed. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always false. It must not be used, and will be removed in API v1.50. + type: "boolean" + default: false + example: false + OpenStdin: + description: | + Open `stdin` + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always false. It must not be used, and will be removed in API v1.50. + type: "boolean" + default: false + example: false + StdinOnce: + description: | + Close `stdin` after one attached client disconnects. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always false. It must not be used, and will be removed in API v1.50. + type: "boolean" + default: false + example: false + Env: + description: | + A list of environment variables to set inside the container in the + form `["VAR=value", ...]`. A variable without `=` is removed from the + environment, rather than to have an empty value. + type: "array" + items: + type: "string" + example: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Cmd: + description: | + Command to run specified as a string or an array of strings. + type: "array" + items: + type: "string" + example: ["/bin/sh"] + Healthcheck: + $ref: "#/definitions/HealthConfig" + ArgsEscaped: + description: "Command is already escaped (Windows only)" + type: "boolean" + default: false + example: false + x-nullable: true + Image: + description: | + The name (or reference) of the image to use when creating the container, + or which was used when the container was created. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always empty. It must not be used, and will be removed in API v1.50. + type: "string" + default: "" + example: "" + Volumes: + description: | + An object mapping mount point paths inside the container to empty + objects. + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + example: + "/app/data": {} + "/app/config": {} + WorkingDir: + description: "The working directory for commands to run in." + type: "string" + example: "/public/" + Entrypoint: + description: | + The entry point for the container as a string or an array of strings. + + If the array consists of exactly one empty string (`[""]`) then the + entry point is reset to system default (i.e., the entry point used by + docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + type: "array" + items: + type: "string" + example: [] + NetworkDisabled: + description: | + Disable networking for the container. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always omitted. It must not be used, and will be removed in API v1.50. + type: "boolean" + default: false + example: false + x-nullable: true + MacAddress: + description: | + MAC address of the container. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always omitted. It must not be used, and will be removed in API v1.50. + type: "string" + default: "" + example: "" + x-nullable: true + OnBuild: + description: | + `ONBUILD` metadata that were defined in the image's `Dockerfile`. + type: "array" + x-nullable: true + items: + type: "string" + example: [] + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + StopSignal: + description: | + Signal to stop a container as a string or unsigned integer. + type: "string" + example: "SIGTERM" + x-nullable: true + StopTimeout: + description: | + Timeout to stop a container in seconds. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always omitted. It must not be used, and will be removed in API v1.50. + type: "integer" + default: 10 + x-nullable: true + Shell: + description: | + Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. + type: "array" + x-nullable: true + items: + type: "string" + example: ["/bin/sh", "-c"] + # FIXME(thaJeztah): temporarily using a full example to remove some "omitempty" fields. Remove once the fields are removed. + example: + "Hostname": "" + "Domainname": "" + "User": "web:web" + "AttachStdin": false + "AttachStdout": false + "AttachStderr": false + "ExposedPorts": { + "80/tcp": {}, + "443/tcp": {} + } + "Tty": false + "OpenStdin": false + "StdinOnce": false + "Env": ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"] + "Cmd": ["/bin/sh"] + "Healthcheck": { + "Test": ["string"], + "Interval": 0, + "Timeout": 0, + "Retries": 0, + "StartPeriod": 0, + "StartInterval": 0 + } + "ArgsEscaped": true + "Image": "" + "Volumes": { + "/app/data": {}, + "/app/config": {} + } + "WorkingDir": "/public/" + "Entrypoint": [] + "OnBuild": [] + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } + "StopSignal": "SIGTERM" + "Shell": ["/bin/sh", "-c"] + + NetworkingConfig: + description: | + NetworkingConfig represents the container's networking configuration for + each of its interfaces. + It is used for the networking configs specified in the `docker create` + and `docker network connect` commands. + type: "object" + properties: + EndpointsConfig: + description: | + A mapping of network name to endpoint configuration for that network. + The endpoint configuration can be left empty to connect to that + network with no particular endpoint configuration. + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + example: + # putting an example here, instead of using the example values from + # /definitions/EndpointSettings, because EndpointSettings contains + # operational data returned when inspecting a container that we don't + # accept here. + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + MacAddress: "02:42:ac:12:05:02" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + database_nw: {} + + NetworkSettings: + description: "NetworkSettings exposes the network settings in the API" + type: "object" + properties: + Bridge: + description: | + Name of the default bridge interface when dockerd's --bridge flag is set. + type: "string" + example: "docker0" + SandboxID: + description: SandboxID uniquely represents a container's network stack. + type: "string" + example: "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3" + HairpinMode: + description: | + Indicates if hairpin NAT should be enabled on the virtual interface. + + Deprecated: This field is never set and will be removed in a future release. + type: "boolean" + example: false + LinkLocalIPv6Address: + description: | + IPv6 unicast address using the link-local prefix. + + Deprecated: This field is never set and will be removed in a future release. + type: "string" + example: "" + LinkLocalIPv6PrefixLen: + description: | + Prefix length of the IPv6 unicast address. + + Deprecated: This field is never set and will be removed in a future release. + type: "integer" + example: "" + Ports: + $ref: "#/definitions/PortMap" + SandboxKey: + description: SandboxKey is the full path of the netns handle + type: "string" + example: "/var/run/docker/netns/8ab54b426c38" + + SecondaryIPAddresses: + description: "Deprecated: This field is never set and will be removed in a future release." + type: "array" + items: + $ref: "#/definitions/Address" + x-nullable: true + + SecondaryIPv6Addresses: + description: "Deprecated: This field is never set and will be removed in a future release." + type: "array" + items: + $ref: "#/definitions/Address" + x-nullable: true + + # TODO properties below are part of DefaultNetworkSettings, which is + # marked as deprecated since Docker 1.9 and to be removed in Docker v17.12 + EndpointID: + description: | + EndpointID uniquely represents a service endpoint in a Sandbox. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" + Gateway: + description: | + Gateway address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "172.17.0.1" + GlobalIPv6Address: + description: | + Global IPv6 address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "2001:db8::5689" + GlobalIPv6PrefixLen: + description: | + Mask length of the global IPv6 address. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "integer" + example: 64 + IPAddress: + description: | + IPv4 address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "172.17.0.4" + IPPrefixLen: + description: | + Mask length of the IPv4 address. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "integer" + example: 16 + IPv6Gateway: + description: | + IPv6 gateway address for this network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "2001:db8:2::100" + MacAddress: + description: | + MAC address for the container on the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "02:42:ac:11:00:04" + Networks: + description: | + Information about all networks that the container is connected to. + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + + Address: + description: Address represents an IPv4 or IPv6 IP address. + type: "object" + properties: + Addr: + description: IP address. + type: "string" + PrefixLen: + description: Mask length of the IP address. + type: "integer" + + PortMap: + description: | + PortMap describes the mapping of container ports to host ports, using the + container's port-number and protocol as key in the format `/`, + for example, `80/udp`. + + If a container's port is mapped for multiple protocols, separate entries + are added to the mapping table. + type: "object" + additionalProperties: + type: "array" + x-nullable: true + items: + $ref: "#/definitions/PortBinding" + example: + "443/tcp": + - HostIp: "127.0.0.1" + HostPort: "4443" + "80/tcp": + - HostIp: "0.0.0.0" + HostPort: "80" + - HostIp: "0.0.0.0" + HostPort: "8080" + "80/udp": + - HostIp: "0.0.0.0" + HostPort: "80" + "53/udp": + - HostIp: "0.0.0.0" + HostPort: "53" + "2377/tcp": null + + PortBinding: + description: | + PortBinding represents a binding between a host IP address and a host + port. + type: "object" + properties: + HostIp: + description: "Host IP address that the container's port is mapped to." + type: "string" + example: "127.0.0.1" + HostPort: + description: "Host port number that the container's port is mapped to." + type: "string" + example: "4443" + + DriverData: + description: | + Information about the storage driver used to store the container's and + image's filesystem. + type: "object" + required: [Name, Data] + properties: + Name: + description: "Name of the storage driver." + type: "string" + x-nullable: false + example: "overlay2" + Data: + description: | + Low-level storage metadata, provided as key/value pairs. + + This information is driver-specific, and depends on the storage-driver + in use, and should be used for informational purposes only. + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: { + "MergedDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/merged", + "UpperDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/diff", + "WorkDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/work" + } + + FilesystemChange: + description: | + Change in the container's filesystem. + type: "object" + required: [Path, Kind] + properties: + Path: + description: | + Path to file or directory that has changed. + type: "string" + x-nullable: false + Kind: + $ref: "#/definitions/ChangeType" + + ChangeType: + description: | + Kind of change + + Can be one of: + + - `0`: Modified ("C") + - `1`: Added ("A") + - `2`: Deleted ("D") + type: "integer" + format: "uint8" + enum: [0, 1, 2] + x-nullable: false + + ImageInspect: + description: | + Information about an image in the local image cache. + type: "object" + properties: + Id: + description: | + ID is the content-addressable ID of an image. + + This identifier is a content-addressable digest calculated from the + image's configuration (which includes the digests of layers used by + the image). + + Note that this digest differs from the `RepoDigests` below, which + holds digests of image manifests that reference the image. + type: "string" + x-nullable: false + example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" + Descriptor: + description: | + Descriptor is an OCI descriptor of the image target. + In case of a multi-platform image, this descriptor points to the OCI index + or a manifest list. + + This field is only present if the daemon provides a multi-platform image store. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + x-nullable: true + $ref: "#/definitions/OCIDescriptor" + Manifests: + description: | + Manifests is a list of image manifests available in this image. It + provides a more detailed view of the platform-specific image manifests or + other image-attached data like build attestations. + + Only available if the daemon provides a multi-platform image store + and the `manifests` option is set in the inspect request. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + type: "array" + x-nullable: true + items: + $ref: "#/definitions/ImageManifestSummary" + RepoTags: + description: | + List of image names/tags in the local image cache that reference this + image. + + Multiple image tags can refer to the same image, and this list may be + empty if no tags reference the image, in which case the image is + "untagged", in which case it can still be referenced by its ID. + type: "array" + items: + type: "string" + example: + - "example:1.0" + - "example:latest" + - "example:stable" + - "internal.registry.example.com:5000/example:1.0" + RepoDigests: + description: | + List of content-addressable digests of locally available image manifests + that the image is referenced from. Multiple manifests can refer to the + same image. + + These digests are usually only available if the image was either pulled + from a registry, or if the image was pushed to a registry, which is when + the manifest is generated and its digest calculated. + type: "array" + items: + type: "string" + example: + - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" + - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + Parent: + description: | + ID of the parent image. + + Depending on how the image was created, this field may be empty and + is only set for images that were built/created locally. This field + is empty if the image was pulled from an image registry. + type: "string" + x-nullable: false + example: "" + Comment: + description: | + Optional message that was set when committing or importing the image. + type: "string" + x-nullable: false + example: "" + Created: + description: | + Date and time at which the image was created, formatted in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + + This information is only available if present in the image, + and omitted otherwise. + type: "string" + format: "dateTime" + x-nullable: true + example: "2022-02-04T21:20:12.497794809Z" + DockerVersion: + description: | + The version of Docker that was used to build the image. + + Depending on how the image was created, this field may be empty. + type: "string" + x-nullable: false + example: "27.0.1" + Author: + description: | + Name of the author that was specified when committing the image, or as + specified through MAINTAINER (deprecated) in the Dockerfile. + type: "string" + x-nullable: false + example: "" + Config: + $ref: "#/definitions/ImageConfig" + Architecture: + description: | + Hardware CPU architecture that the image runs on. + type: "string" + x-nullable: false + example: "arm" + Variant: + description: | + CPU architecture variant (presently ARM-only). + type: "string" + x-nullable: true + example: "v7" + Os: + description: | + Operating System the image is built to run on. + type: "string" + x-nullable: false + example: "linux" + OsVersion: + description: | + Operating System version the image is built to run on (especially + for Windows). + type: "string" + example: "" + x-nullable: true + Size: + description: | + Total size of the image including all layers it is composed of. + type: "integer" + format: "int64" + x-nullable: false + example: 1239828 + VirtualSize: + description: | + Total size of the image including all layers it is composed of. + + Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. + type: "integer" + format: "int64" + example: 1239828 + GraphDriver: + $ref: "#/definitions/DriverData" + RootFS: + description: | + Information about the image's RootFS, including the layer IDs. + type: "object" + required: [Type] + properties: + Type: + type: "string" + x-nullable: false + example: "layers" + Layers: + type: "array" + items: + type: "string" + example: + - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" + - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + Metadata: + description: | + Additional metadata of the image in the local cache. This information + is local to the daemon, and not part of the image itself. + type: "object" + properties: + LastTagTime: + description: | + Date and time at which the image was last tagged in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + + This information is only available if the image was tagged locally, + and omitted otherwise. + type: "string" + format: "dateTime" + example: "2022-02-28T14:40:02.623929178Z" + x-nullable: true + + ImageSummary: + type: "object" + x-go-name: "Summary" + required: + - Id + - ParentId + - RepoTags + - RepoDigests + - Created + - Size + - SharedSize + - Labels + - Containers + properties: + Id: + description: | + ID is the content-addressable ID of an image. + + This identifier is a content-addressable digest calculated from the + image's configuration (which includes the digests of layers used by + the image). + + Note that this digest differs from the `RepoDigests` below, which + holds digests of image manifests that reference the image. + type: "string" + x-nullable: false + example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" + ParentId: + description: | + ID of the parent image. + + Depending on how the image was created, this field may be empty and + is only set for images that were built/created locally. This field + is empty if the image was pulled from an image registry. + type: "string" + x-nullable: false + example: "" + RepoTags: + description: | + List of image names/tags in the local image cache that reference this + image. + + Multiple image tags can refer to the same image, and this list may be + empty if no tags reference the image, in which case the image is + "untagged", in which case it can still be referenced by its ID. + type: "array" + x-nullable: false + items: + type: "string" + example: + - "example:1.0" + - "example:latest" + - "example:stable" + - "internal.registry.example.com:5000/example:1.0" + RepoDigests: + description: | + List of content-addressable digests of locally available image manifests + that the image is referenced from. Multiple manifests can refer to the + same image. + + These digests are usually only available if the image was either pulled + from a registry, or if the image was pushed to a registry, which is when + the manifest is generated and its digest calculated. + type: "array" + x-nullable: false + items: + type: "string" + example: + - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" + - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + Created: + description: | + Date and time at which the image was created as a Unix timestamp + (number of seconds since EPOCH). + type: "integer" + x-nullable: false + example: "1644009612" + Size: + description: | + Total size of the image including all layers it is composed of. + type: "integer" + format: "int64" + x-nullable: false + example: 172064416 + SharedSize: + description: | + Total size of image layers that are shared between this image and other + images. + + This size is not calculated by default. `-1` indicates that the value + has not been set / calculated. + type: "integer" + format: "int64" + x-nullable: false + example: 1239828 + VirtualSize: + description: |- + Total size of the image including all layers it is composed of. + + Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. + type: "integer" + format: "int64" + example: 172064416 + Labels: + description: "User-defined key/value metadata." + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Containers: + description: | + Number of containers using this image. Includes both stopped and running + containers. + + This size is not calculated by default, and depends on which API endpoint + is used. `-1` indicates that the value has not been set / calculated. + x-nullable: false + type: "integer" + example: 2 + Manifests: + description: | + Manifests is a list of manifests available in this image. + It provides a more detailed view of the platform-specific image manifests + or other image-attached data like build attestations. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + type: "array" + x-nullable: false + x-omitempty: true + items: + $ref: "#/definitions/ImageManifestSummary" + Descriptor: + description: | + Descriptor is an OCI descriptor of the image target. + In case of a multi-platform image, this descriptor points to the OCI index + or a manifest list. + + This field is only present if the daemon provides a multi-platform image store. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + x-nullable: true + $ref: "#/definitions/OCIDescriptor" + + AuthConfig: + type: "object" + properties: + username: + type: "string" + password: + type: "string" + email: + type: "string" + serveraddress: + type: "string" + example: + username: "hannibal" + password: "xxxx" + serveraddress: "https://index.docker.io/v1/" + + ProcessConfig: + type: "object" + properties: + privileged: + type: "boolean" + user: + type: "string" + tty: + type: "boolean" + entrypoint: + type: "string" + arguments: + type: "array" + items: + type: "string" + + Volume: + type: "object" + required: [Name, Driver, Mountpoint, Labels, Scope, Options] + properties: + Name: + type: "string" + description: "Name of the volume." + x-nullable: false + example: "tardis" + Driver: + type: "string" + description: "Name of the volume driver used by the volume." + x-nullable: false + example: "custom" + Mountpoint: + type: "string" + description: "Mount path of the volume on the host." + x-nullable: false + example: "/var/lib/docker/volumes/tardis" + CreatedAt: + type: "string" + format: "dateTime" + description: "Date/Time the volume was created." + example: "2016-06-07T20:31:11.853781916Z" + Status: + type: "object" + description: | + Low-level details about the volume, provided by the volume driver. + Details are returned as a map with key/value pairs: + `{"key":"value","key2":"value2"}`. + + The `Status` field is optional, and is omitted if the volume driver + does not support this feature. + additionalProperties: + type: "object" + example: + hello: "world" + Labels: + type: "object" + description: "User-defined key/value metadata." + x-nullable: false + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Scope: + type: "string" + description: | + The level at which the volume exists. Either `global` for cluster-wide, + or `local` for machine level. + default: "local" + x-nullable: false + enum: ["local", "global"] + example: "local" + ClusterVolume: + $ref: "#/definitions/ClusterVolume" + Options: + type: "object" + description: | + The driver specific options used when creating the volume. + additionalProperties: + type: "string" + example: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + UsageData: + type: "object" + x-nullable: true + x-go-name: "UsageData" + required: [Size, RefCount] + description: | + Usage details about the volume. This information is used by the + `GET /system/df` endpoint, and omitted in other endpoints. + properties: + Size: + type: "integer" + format: "int64" + default: -1 + description: | + Amount of disk space used by the volume (in bytes). This information + is only available for volumes created with the `"local"` volume + driver. For volumes created with other volume drivers, this field + is set to `-1` ("not available") + x-nullable: false + RefCount: + type: "integer" + format: "int64" + default: -1 + description: | + The number of containers referencing this volume. This field + is set to `-1` if the reference-count is not available. + x-nullable: false + + VolumeCreateOptions: + description: "Volume configuration" + type: "object" + title: "VolumeConfig" + x-go-name: "CreateOptions" + properties: + Name: + description: | + The new volume's name. If not specified, Docker generates a name. + type: "string" + x-nullable: false + example: "tardis" + Driver: + description: "Name of the volume driver to use." + type: "string" + default: "local" + x-nullable: false + example: "custom" + DriverOpts: + description: | + A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. + type: "object" + additionalProperties: + type: "string" + example: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + ClusterVolumeSpec: + $ref: "#/definitions/ClusterVolumeSpec" + + VolumeListResponse: + type: "object" + title: "VolumeListResponse" + x-go-name: "ListResponse" + description: "Volume list response" + properties: + Volumes: + type: "array" + description: "List of volumes" + items: + $ref: "#/definitions/Volume" + Warnings: + type: "array" + description: | + Warnings that occurred when fetching the list of volumes. + items: + type: "string" + example: [] + + Network: + type: "object" + properties: + Name: + description: | + Name of the network. + type: "string" + example: "my_network" + Id: + description: | + ID that uniquely identifies a network on a single machine. + type: "string" + example: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" + Created: + description: | + Date and time at which the network was created in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-10-19T04:33:30.360899459Z" + Scope: + description: | + The level at which the network exists (e.g. `swarm` for cluster-wide + or `local` for machine level) + type: "string" + example: "local" + Driver: + description: | + The name of the driver used to create the network (e.g. `bridge`, + `overlay`). + type: "string" + example: "overlay" + EnableIPv4: + description: | + Whether the network was created with IPv4 enabled. + type: "boolean" + example: true + EnableIPv6: + description: | + Whether the network was created with IPv6 enabled. + type: "boolean" + example: false + IPAM: + $ref: "#/definitions/IPAM" + Internal: + description: | + Whether the network is created to only allow internal networking + connectivity. + type: "boolean" + default: false + example: false + Attachable: + description: | + Whether a global / swarm scope network is manually attachable by regular + containers from workers in swarm mode. + type: "boolean" + default: false + example: false + Ingress: + description: | + Whether the network is providing the routing-mesh for the swarm cluster. + type: "boolean" + default: false + example: false + ConfigFrom: + $ref: "#/definitions/ConfigReference" + ConfigOnly: + description: | + Whether the network is a config-only network. Config-only networks are + placeholder networks for network configurations to be used by other + networks. Config-only networks cannot be used directly to run containers + or services. + type: "boolean" + default: false + Containers: + description: | + Contains endpoints attached to the network. + type: "object" + additionalProperties: + $ref: "#/definitions/NetworkContainer" + example: + 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: + Name: "test" + EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" + MacAddress: "02:42:ac:13:00:02" + IPv4Address: "172.19.0.2/16" + IPv6Address: "" + Options: + description: | + Network-specific options uses when creating the network. + type: "object" + additionalProperties: + type: "string" + example: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Peers: + description: | + List of peer nodes for an overlay network. This field is only present + for overlay networks, and omitted for other network types. + type: "array" + items: + $ref: "#/definitions/PeerInfo" + x-nullable: true + # TODO: Add Services (only present when "verbose" is set). + + ConfigReference: + description: | + The config-only network source to provide the configuration for + this network. + type: "object" + properties: + Network: + description: | + The name of the config-only network that provides the network's + configuration. The specified network must be an existing config-only + network. Only network names are allowed, not network IDs. + type: "string" + example: "config_only_network_01" + + IPAM: + type: "object" + properties: + Driver: + description: "Name of the IPAM driver to use." + type: "string" + default: "default" + example: "default" + Config: + description: | + List of IPAM configuration options, specified as a map: + + ``` + {"Subnet": , "IPRange": , "Gateway": , "AuxAddress": } + ``` + type: "array" + items: + $ref: "#/definitions/IPAMConfig" + Options: + description: "Driver-specific options, specified as a map." + type: "object" + additionalProperties: + type: "string" + example: + foo: "bar" + + IPAMConfig: + type: "object" + properties: + Subnet: + type: "string" + example: "172.20.0.0/16" + IPRange: + type: "string" + example: "172.20.10.0/24" + Gateway: + type: "string" + example: "172.20.10.11" + AuxiliaryAddresses: + type: "object" + additionalProperties: + type: "string" + + NetworkContainer: + type: "object" + properties: + Name: + type: "string" + example: "container_1" + EndpointID: + type: "string" + example: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" + MacAddress: + type: "string" + example: "02:42:ac:13:00:02" + IPv4Address: + type: "string" + example: "172.19.0.2/16" + IPv6Address: + type: "string" + example: "" + + PeerInfo: + description: | + PeerInfo represents one peer of an overlay network. + type: "object" + properties: + Name: + description: + ID of the peer-node in the Swarm cluster. + type: "string" + example: "6869d7c1732b" + IP: + description: + IP-address of the peer-node in the Swarm cluster. + type: "string" + example: "10.133.77.91" + + NetworkCreateResponse: + description: "OK response to NetworkCreate operation" + type: "object" + title: "NetworkCreateResponse" + x-go-name: "CreateResponse" + required: [Id, Warning] + properties: + Id: + description: "The ID of the created network." + type: "string" + x-nullable: false + example: "b5c4fc71e8022147cd25de22b22173de4e3b170134117172eb595cb91b4e7e5d" + Warning: + description: "Warnings encountered when creating the container" + type: "string" + x-nullable: false + example: "" + + BuildInfo: + type: "object" + properties: + id: + type: "string" + stream: + type: "string" + error: + type: "string" + x-nullable: true + description: |- + errors encountered during the operation. + + + > **Deprecated**: This field is deprecated since API v1.4, and will be omitted in a future API version. Use the information in errorDetail instead. + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + x-nullable: true + description: |- + Progress is a pre-formatted presentation of progressDetail. + + + > **Deprecated**: This field is deprecated since API v1.8, and will be omitted in a future API version. Use the information in progressDetail instead. + progressDetail: + $ref: "#/definitions/ProgressDetail" + aux: + $ref: "#/definitions/ImageID" + + BuildCache: + type: "object" + description: | + BuildCache contains information about a build cache record. + properties: + ID: + type: "string" + description: | + Unique ID of the build cache record. + example: "ndlpt0hhvkqcdfkputsk4cq9c" + Parent: + description: | + ID of the parent build cache record. + + > **Deprecated**: This field is deprecated, and omitted if empty. + type: "string" + x-nullable: true + example: "" + Parents: + description: | + List of parent build cache record IDs. + type: "array" + items: + type: "string" + x-nullable: true + example: ["hw53o5aio51xtltp5xjp8v7fx"] + Type: + type: "string" + description: | + Cache record type. + example: "regular" + # see https://github.com/moby/buildkit/blob/fce4a32258dc9d9664f71a4831d5de10f0670677/client/diskusage.go#L75-L84 + enum: + - "internal" + - "frontend" + - "source.local" + - "source.git.checkout" + - "exec.cachemount" + - "regular" + Description: + type: "string" + description: | + Description of the build-step that produced the build cache. + example: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" + InUse: + type: "boolean" + description: | + Indicates if the build cache is in use. + example: false + Shared: + type: "boolean" + description: | + Indicates if the build cache is shared. + example: true + Size: + description: | + Amount of disk space used by the build cache (in bytes). + type: "integer" + example: 51 + CreatedAt: + description: | + Date and time at which the build cache was created in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + LastUsedAt: + description: | + Date and time at which the build cache was last used in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + x-nullable: true + example: "2017-08-09T07:09:37.632105588Z" + UsageCount: + type: "integer" + example: 26 + + ImageID: + type: "object" + description: "Image ID or Digest" + properties: + ID: + type: "string" + example: + ID: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" + + CreateImageInfo: + type: "object" + properties: + id: + type: "string" + error: + type: "string" + x-nullable: true + description: |- + errors encountered during the operation. + + + > **Deprecated**: This field is deprecated since API v1.4, and will be omitted in a future API version. Use the information in errorDetail instead. + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + x-nullable: true + description: |- + Progress is a pre-formatted presentation of progressDetail. + + + > **Deprecated**: This field is deprecated since API v1.8, and will be omitted in a future API version. Use the information in progressDetail instead. + progressDetail: + $ref: "#/definitions/ProgressDetail" + + PushImageInfo: + type: "object" + properties: + error: + type: "string" + x-nullable: true + description: |- + errors encountered during the operation. + + + > **Deprecated**: This field is deprecated since API v1.4, and will be omitted in a future API version. Use the information in errorDetail instead. + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + x-nullable: true + description: |- + Progress is a pre-formatted presentation of progressDetail. + + + > **Deprecated**: This field is deprecated since API v1.8, and will be omitted in a future API version. Use the information in progressDetail instead. + progressDetail: + $ref: "#/definitions/ProgressDetail" + + ErrorDetail: + type: "object" + properties: + code: + type: "integer" + message: + type: "string" + + ProgressDetail: + type: "object" + properties: + current: + type: "integer" + total: + type: "integer" + + ErrorResponse: + description: "Represents an error." + type: "object" + required: ["message"] + properties: + message: + description: "The error message." + type: "string" + x-nullable: false + example: + message: "Something went wrong." + + IDResponse: + description: "Response to an API call that returns just an Id" + type: "object" + x-go-name: "IDResponse" + required: ["Id"] + properties: + Id: + description: "The id of the newly created object." + type: "string" + x-nullable: false + + EndpointSettings: + description: "Configuration for a network endpoint." + type: "object" + properties: + # Configurations + IPAMConfig: + $ref: "#/definitions/EndpointIPAMConfig" + Links: + type: "array" + items: + type: "string" + example: + - "container_1" + - "container_2" + MacAddress: + description: | + MAC address for the endpoint on this network. The network driver might ignore this parameter. + type: "string" + example: "02:42:ac:11:00:04" + Aliases: + type: "array" + items: + type: "string" + example: + - "server_x" + - "server_y" + DriverOpts: + description: | + DriverOpts is a mapping of driver options and values. These options + are passed directly to the driver and are driver specific. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + GwPriority: + description: | + This property determines which endpoint will provide the default + gateway for a container. The endpoint with the highest priority will + be used. If multiple endpoints have the same priority, endpoints are + lexicographically sorted based on their network name, and the one + that sorts first is picked. + type: "integer" + format: "int64" + example: + - 10 + + # Operational data + NetworkID: + description: | + Unique ID of the network. + type: "string" + example: "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a" + EndpointID: + description: | + Unique ID for the service endpoint in a Sandbox. + type: "string" + example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" + Gateway: + description: | + Gateway address for this network. + type: "string" + example: "172.17.0.1" + IPAddress: + description: | + IPv4 address. + type: "string" + example: "172.17.0.4" + IPPrefixLen: + description: | + Mask length of the IPv4 address. + type: "integer" + example: 16 + IPv6Gateway: + description: | + IPv6 gateway address. + type: "string" + example: "2001:db8:2::100" + GlobalIPv6Address: + description: | + Global IPv6 address. + type: "string" + example: "2001:db8::5689" + GlobalIPv6PrefixLen: + description: | + Mask length of the global IPv6 address. + type: "integer" + format: "int64" + example: 64 + DNSNames: + description: | + List of all DNS names an endpoint has on a specific network. This + list is based on the container name, network aliases, container short + ID, and hostname. + + These DNS names are non-fully qualified but can contain several dots. + You can get fully qualified DNS names by appending `.`. + For instance, if container name is `my.ctr` and the network is named + `testnet`, `DNSNames` will contain `my.ctr` and the FQDN will be + `my.ctr.testnet`. + type: array + items: + type: string + example: ["foobar", "server_x", "server_y", "my.ctr"] + + EndpointIPAMConfig: + description: | + EndpointIPAMConfig represents an endpoint's IPAM configuration. + type: "object" + x-nullable: true + properties: + IPv4Address: + type: "string" + example: "172.20.30.33" + IPv6Address: + type: "string" + example: "2001:db8:abcd::3033" + LinkLocalIPs: + type: "array" + items: + type: "string" + example: + - "169.254.34.68" + - "fe80::3468" + + PluginMount: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Source, Destination, Type, Options] + properties: + Name: + type: "string" + x-nullable: false + example: "some-mount" + Description: + type: "string" + x-nullable: false + example: "This is a mount that's used by the plugin." + Settable: + type: "array" + items: + type: "string" + Source: + type: "string" + example: "/var/lib/docker/plugins/" + Destination: + type: "string" + x-nullable: false + example: "/mnt/state" + Type: + type: "string" + x-nullable: false + example: "bind" + Options: + type: "array" + items: + type: "string" + example: + - "rbind" + - "rw" + + PluginDevice: + type: "object" + required: [Name, Description, Settable, Path] + x-nullable: false + properties: + Name: + type: "string" + x-nullable: false + Description: + type: "string" + x-nullable: false + Settable: + type: "array" + items: + type: "string" + Path: + type: "string" + example: "/dev/fuse" + + PluginEnv: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + Description: + x-nullable: false + type: "string" + Settable: + type: "array" + items: + type: "string" + Value: + type: "string" + + PluginInterfaceType: + type: "object" + x-nullable: false + required: [Prefix, Capability, Version] + properties: + Prefix: + type: "string" + x-nullable: false + Capability: + type: "string" + x-nullable: false + Version: + type: "string" + x-nullable: false + + PluginPrivilege: + description: | + Describes a permission the user has to accept upon installing + the plugin. + type: "object" + x-go-name: "PluginPrivilege" + properties: + Name: + type: "string" + example: "network" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - "host" + + Plugin: + description: "A plugin for the Engine API" + type: "object" + required: [Settings, Enabled, Config, Name] + properties: + Id: + type: "string" + example: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" + Name: + type: "string" + x-nullable: false + example: "tiborvass/sample-volume-plugin" + Enabled: + description: + True if the plugin is running. False if the plugin is not running, + only installed. + type: "boolean" + x-nullable: false + example: true + Settings: + description: "Settings that can be modified by users." + type: "object" + x-nullable: false + required: [Args, Devices, Env, Mounts] + properties: + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + type: "string" + example: + - "DEBUG=0" + Args: + type: "array" + items: + type: "string" + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PluginReference: + description: "plugin remote reference used to push/pull the plugin" + type: "string" + x-nullable: false + example: "localhost:5000/tiborvass/sample-volume-plugin:latest" + Config: + description: "The config of a plugin." + type: "object" + x-nullable: false + required: + - Description + - Documentation + - Interface + - Entrypoint + - WorkDir + - Network + - Linux + - PidHost + - PropagatedMount + - IpcHost + - Mounts + - Env + - Args + properties: + DockerVersion: + description: "Docker Version used to create the plugin" + type: "string" + x-nullable: false + example: "17.06.0-ce" + Description: + type: "string" + x-nullable: false + example: "A sample volume plugin for Docker" + Documentation: + type: "string" + x-nullable: false + example: "https://docs.docker.com/engine/extend/plugins/" + Interface: + description: "The interface between Docker and the plugin" + x-nullable: false + type: "object" + required: [Types, Socket] + properties: + Types: + type: "array" + items: + $ref: "#/definitions/PluginInterfaceType" + example: + - "docker.volumedriver/1.0" + Socket: + type: "string" + x-nullable: false + example: "plugins.sock" + ProtocolScheme: + type: "string" + example: "some.protocol/v1.0" + description: "Protocol to use for clients connecting to the plugin." + enum: + - "" + - "moby.plugins.http/v1" + Entrypoint: + type: "array" + items: + type: "string" + example: + - "/usr/bin/sample-volume-plugin" + - "/data" + WorkDir: + type: "string" + x-nullable: false + example: "/bin/" + User: + type: "object" + x-nullable: false + properties: + UID: + type: "integer" + format: "uint32" + example: 1000 + GID: + type: "integer" + format: "uint32" + example: 1000 + Network: + type: "object" + x-nullable: false + required: [Type] + properties: + Type: + x-nullable: false + type: "string" + example: "host" + Linux: + type: "object" + x-nullable: false + required: [Capabilities, AllowAllDevices, Devices] + properties: + Capabilities: + type: "array" + items: + type: "string" + example: + - "CAP_SYS_ADMIN" + - "CAP_SYSLOG" + AllowAllDevices: + type: "boolean" + x-nullable: false + example: false + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PropagatedMount: + type: "string" + x-nullable: false + example: "/mnt/volumes" + IpcHost: + type: "boolean" + x-nullable: false + example: false + PidHost: + type: "boolean" + x-nullable: false + example: false + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + $ref: "#/definitions/PluginEnv" + example: + - Name: "DEBUG" + Description: "If set, prints debug messages" + Settable: null + Value: "0" + Args: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + example: "args" + Description: + x-nullable: false + type: "string" + example: "command line arguments" + Settable: + type: "array" + items: + type: "string" + Value: + type: "array" + items: + type: "string" + rootfs: + type: "object" + properties: + type: + type: "string" + example: "layers" + diff_ids: + type: "array" + items: + type: "string" + example: + - "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887" + - "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" + + ObjectVersion: + description: | + The version number of the object such as node, service, etc. This is needed + to avoid conflicting writes. The client must send the version number along + with the modified specification when updating these objects. + + This approach ensures safe concurrency and determinism in that the change + on the object may not be applied if the version number has changed from the + last read. In other words, if two update requests specify the same base + version, only one of the requests can succeed. As a result, two separate + update requests that happen at the same time will not unintentionally + overwrite each other. + type: "object" + properties: + Index: + type: "integer" + format: "uint64" + example: 373531 + + NodeSpec: + type: "object" + properties: + Name: + description: "Name for the node." + type: "string" + example: "my-node" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Role: + description: "Role of the node." + type: "string" + enum: + - "worker" + - "manager" + example: "manager" + Availability: + description: "Availability of the node." + type: "string" + enum: + - "active" + - "pause" + - "drain" + example: "active" + example: + Availability: "active" + Name: "node-name" + Role: "manager" + Labels: + foo: "bar" + + Node: + type: "object" + properties: + ID: + type: "string" + example: "24ifsmvkjbyhk" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + description: | + Date and time at which the node was added to the swarm in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + UpdatedAt: + description: | + Date and time at which the node was last updated in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2017-08-09T07:09:37.632105588Z" + Spec: + $ref: "#/definitions/NodeSpec" + Description: + $ref: "#/definitions/NodeDescription" + Status: + $ref: "#/definitions/NodeStatus" + ManagerStatus: + $ref: "#/definitions/ManagerStatus" + + NodeDescription: + description: | + NodeDescription encapsulates the properties of the Node as reported by the + agent. + type: "object" + properties: + Hostname: + type: "string" + example: "bf3067039e47" + Platform: + $ref: "#/definitions/Platform" + Resources: + $ref: "#/definitions/ResourceObject" + Engine: + $ref: "#/definitions/EngineDescription" + TLSInfo: + $ref: "#/definitions/TLSInfo" + + Platform: + description: | + Platform represents the platform (Arch/OS). + type: "object" + properties: + Architecture: + description: | + Architecture represents the hardware architecture (for example, + `x86_64`). + type: "string" + example: "x86_64" + OS: + description: | + OS represents the Operating System (for example, `linux` or `windows`). + type: "string" + example: "linux" + + EngineDescription: + description: "EngineDescription provides information about an engine." + type: "object" + properties: + EngineVersion: + type: "string" + example: "17.06.0" + Labels: + type: "object" + additionalProperties: + type: "string" + example: + foo: "bar" + Plugins: + type: "array" + items: + type: "object" + properties: + Type: + type: "string" + Name: + type: "string" + example: + - Type: "Log" + Name: "awslogs" + - Type: "Log" + Name: "fluentd" + - Type: "Log" + Name: "gcplogs" + - Type: "Log" + Name: "gelf" + - Type: "Log" + Name: "journald" + - Type: "Log" + Name: "json-file" + - Type: "Log" + Name: "splunk" + - Type: "Log" + Name: "syslog" + - Type: "Network" + Name: "bridge" + - Type: "Network" + Name: "host" + - Type: "Network" + Name: "ipvlan" + - Type: "Network" + Name: "macvlan" + - Type: "Network" + Name: "null" + - Type: "Network" + Name: "overlay" + - Type: "Volume" + Name: "local" + - Type: "Volume" + Name: "localhost:5000/vieux/sshfs:latest" + - Type: "Volume" + Name: "vieux/sshfs:latest" + + TLSInfo: + description: | + Information about the issuer of leaf TLS certificates and the trusted root + CA certificate. + type: "object" + properties: + TrustRoot: + description: | + The root CA certificate(s) that are used to validate leaf TLS + certificates. + type: "string" + CertIssuerSubject: + description: + The base64-url-safe-encoded raw subject bytes of the issuer. + type: "string" + CertIssuerPublicKey: + description: | + The base64-url-safe-encoded raw public key bytes of the issuer. + type: "string" + example: + TrustRoot: | + -----BEGIN CERTIFICATE----- + MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw + EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 + MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH + A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf + 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB + Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO + PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz + pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H + -----END CERTIFICATE----- + CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" + CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" + + NodeStatus: + description: | + NodeStatus represents the status of a node. + + It provides the current status of the node, as seen by the manager. + type: "object" + properties: + State: + $ref: "#/definitions/NodeState" + Message: + type: "string" + example: "" + Addr: + description: "IP address of the node." + type: "string" + example: "172.17.0.2" + + NodeState: + description: "NodeState represents the state of a node." + type: "string" + enum: + - "unknown" + - "down" + - "ready" + - "disconnected" + example: "ready" + + ManagerStatus: + description: | + ManagerStatus represents the status of a manager. + + It provides the current status of a node's manager component, if the node + is a manager. + x-nullable: true + type: "object" + properties: + Leader: + type: "boolean" + default: false + example: true + Reachability: + $ref: "#/definitions/Reachability" + Addr: + description: | + The IP address and port at which the manager is reachable. + type: "string" + example: "10.0.0.46:2377" + + Reachability: + description: "Reachability represents the reachability of a node." + type: "string" + enum: + - "unknown" + - "unreachable" + - "reachable" + example: "reachable" + + SwarmSpec: + description: "User modifiable swarm configuration." + type: "object" + properties: + Name: + description: "Name of the swarm." + type: "string" + example: "default" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.corp.type: "production" + com.example.corp.department: "engineering" + Orchestration: + description: "Orchestration configuration." + type: "object" + x-nullable: true + properties: + TaskHistoryRetentionLimit: + description: | + The number of historic tasks to keep per instance or node. If + negative, never remove completed or failed tasks. + type: "integer" + format: "int64" + example: 10 + Raft: + description: "Raft configuration." + type: "object" + properties: + SnapshotInterval: + description: "The number of log entries between snapshots." + type: "integer" + format: "uint64" + example: 10000 + KeepOldSnapshots: + description: | + The number of snapshots to keep beyond the current snapshot. + type: "integer" + format: "uint64" + LogEntriesForSlowFollowers: + description: | + The number of log entries to keep around to sync up slow followers + after a snapshot is created. + type: "integer" + format: "uint64" + example: 500 + ElectionTick: + description: | + The number of ticks that a follower will wait for a message from + the leader before becoming a candidate and starting an election. + `ElectionTick` must be greater than `HeartbeatTick`. + + A tick currently defaults to one second, so these translate + directly to seconds currently, but this is NOT guaranteed. + type: "integer" + example: 3 + HeartbeatTick: + description: | + The number of ticks between heartbeats. Every HeartbeatTick ticks, + the leader will send a heartbeat to the followers. + + A tick currently defaults to one second, so these translate + directly to seconds currently, but this is NOT guaranteed. + type: "integer" + example: 1 + Dispatcher: + description: "Dispatcher configuration." + type: "object" + x-nullable: true + properties: + HeartbeatPeriod: + description: | + The delay for an agent to send a heartbeat to the dispatcher. + type: "integer" + format: "int64" + example: 5000000000 + CAConfig: + description: "CA configuration." + type: "object" + x-nullable: true + properties: + NodeCertExpiry: + description: "The duration node certificates are issued for." + type: "integer" + format: "int64" + example: 7776000000000000 + ExternalCAs: + description: | + Configuration for forwarding signing requests to an external + certificate authority. + type: "array" + items: + type: "object" + properties: + Protocol: + description: | + Protocol for communication with the external CA (currently + only `cfssl` is supported). + type: "string" + enum: + - "cfssl" + default: "cfssl" + URL: + description: | + URL where certificate signing requests should be sent. + type: "string" + Options: + description: | + An object with key/value pairs that are interpreted as + protocol-specific options for the external CA driver. + type: "object" + additionalProperties: + type: "string" + CACert: + description: | + The root CA certificate (in PEM format) this external CA uses + to issue TLS certificates (assumed to be to the current swarm + root CA certificate if not provided). + type: "string" + SigningCACert: + description: | + The desired signing CA certificate for all swarm node TLS leaf + certificates, in PEM format. + type: "string" + SigningCAKey: + description: | + The desired signing CA key for all swarm node TLS leaf certificates, + in PEM format. + type: "string" + ForceRotate: + description: | + An integer whose purpose is to force swarm to generate a new + signing CA certificate and key, if none have been specified in + `SigningCACert` and `SigningCAKey` + format: "uint64" + type: "integer" + EncryptionConfig: + description: "Parameters related to encryption-at-rest." + type: "object" + properties: + AutoLockManagers: + description: | + If set, generate a key and use it to lock data stored on the + managers. + type: "boolean" + example: false + TaskDefaults: + description: "Defaults for creating tasks in this cluster." + type: "object" + properties: + LogDriver: + description: | + The log driver to use for tasks created in the orchestrator if + unspecified by a service. + + Updating this value only affects new tasks. Existing tasks continue + to use their previously configured log driver until recreated. + type: "object" + properties: + Name: + description: | + The log driver to use as a default for new tasks. + type: "string" + example: "json-file" + Options: + description: | + Driver-specific options for the selected log driver, specified + as key/value pairs. + type: "object" + additionalProperties: + type: "string" + example: + "max-file": "10" + "max-size": "100m" + + # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but + # without `JoinTokens`. + ClusterInfo: + description: | + ClusterInfo represents information about the swarm as is returned by the + "/info" endpoint. Join-tokens are not included. + x-nullable: true + type: "object" + properties: + ID: + description: "The ID of the swarm." + type: "string" + example: "abajmipo7b4xz5ip2nrla6b11" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + description: | + Date and time at which the swarm was initialised in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + UpdatedAt: + description: | + Date and time at which the swarm was last updated in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2017-08-09T07:09:37.632105588Z" + Spec: + $ref: "#/definitions/SwarmSpec" + TLSInfo: + $ref: "#/definitions/TLSInfo" + RootRotationInProgress: + description: | + Whether there is currently a root CA rotation in progress for the swarm + type: "boolean" + example: false + DataPathPort: + description: | + DataPathPort specifies the data path port number for data traffic. + Acceptable port range is 1024 to 49151. + If no port is set or is set to 0, the default port (4789) is used. + type: "integer" + format: "uint32" + default: 4789 + example: 4789 + DefaultAddrPool: + description: | + Default Address Pool specifies default subnet pools for global scope + networks. + type: "array" + items: + type: "string" + format: "CIDR" + example: ["10.10.0.0/16", "20.20.0.0/16"] + SubnetSize: + description: | + SubnetSize specifies the subnet size of the networks created from the + default subnet pool. + type: "integer" + format: "uint32" + maximum: 29 + default: 24 + example: 24 + + JoinTokens: + description: | + JoinTokens contains the tokens workers and managers need to join the swarm. + type: "object" + properties: + Worker: + description: | + The token workers can use to join the swarm. + type: "string" + example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" + Manager: + description: | + The token managers can use to join the swarm. + type: "string" + example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + + Swarm: + type: "object" + allOf: + - $ref: "#/definitions/ClusterInfo" + - type: "object" + properties: + JoinTokens: + $ref: "#/definitions/JoinTokens" + + TaskSpec: + description: "User modifiable task configuration." + type: "object" + properties: + PluginSpec: + type: "object" + description: | + Plugin spec for the service. *(Experimental release only.)* + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + properties: + Name: + description: "The name or 'alias' to use for the plugin." + type: "string" + Remote: + description: "The plugin image reference to use." + type: "string" + Disabled: + description: "Disable the plugin once scheduled." + type: "boolean" + PluginPrivilege: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + ContainerSpec: + type: "object" + description: | + Container spec for the service. + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + properties: + Image: + description: "The image name to use for the container" + type: "string" + Labels: + description: "User-defined key/value data." + type: "object" + additionalProperties: + type: "string" + Command: + description: "The command to be run in the image." + type: "array" + items: + type: "string" + Args: + description: "Arguments to the command." + type: "array" + items: + type: "string" + Hostname: + description: | + The hostname to use for the container, as a valid + [RFC 1123](https://tools.ietf.org/html/rfc1123) hostname. + type: "string" + Env: + description: | + A list of environment variables in the form `VAR=value`. + type: "array" + items: + type: "string" + Dir: + description: "The working directory for commands to run in." + type: "string" + User: + description: "The user inside the container." + type: "string" + Groups: + type: "array" + description: | + A list of additional groups that the container process will run as. + items: + type: "string" + Privileges: + type: "object" + description: "Security options for the container" + properties: + CredentialSpec: + type: "object" + description: "CredentialSpec for managed service account (Windows only)" + properties: + Config: + type: "string" + example: "0bt9dmxjvjiqermk6xrop3ekq" + description: | + Load credential spec from a Swarm Config with the given ID. + The specified config must also be present in the Configs + field with the Runtime property set. + +


+ + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + File: + type: "string" + example: "spec.json" + description: | + Load credential spec from this file. The file is read by + the daemon, and must be present in the `CredentialSpecs` + subdirectory in the docker data directory, which defaults + to `C:\ProgramData\Docker\` on Windows. + + For example, specifying `spec.json` loads + `C:\ProgramData\Docker\CredentialSpecs\spec.json`. + +


+ + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + Registry: + type: "string" + description: | + Load credential spec from this value in the Windows + registry. The specified registry value must be located in: + + `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` + +


+ + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + SELinuxContext: + type: "object" + description: "SELinux labels of the container" + properties: + Disable: + type: "boolean" + description: "Disable SELinux" + User: + type: "string" + description: "SELinux user label" + Role: + type: "string" + description: "SELinux role label" + Type: + type: "string" + description: "SELinux type label" + Level: + type: "string" + description: "SELinux level label" + Seccomp: + type: "object" + description: "Options for configuring seccomp on the container" + properties: + Mode: + type: "string" + enum: + - "default" + - "unconfined" + - "custom" + Profile: + description: "The custom seccomp profile as a json object" + type: "string" + AppArmor: + type: "object" + description: "Options for configuring AppArmor on the container" + properties: + Mode: + type: "string" + enum: + - "default" + - "disabled" + NoNewPrivileges: + type: "boolean" + description: "Configuration of the no_new_privs bit in the container" + + TTY: + description: "Whether a pseudo-TTY should be allocated." + type: "boolean" + OpenStdin: + description: "Open `stdin`" + type: "boolean" + ReadOnly: + description: "Mount the container's root filesystem as read only." + type: "boolean" + Mounts: + description: | + Specification for mounts to be added to containers created as part + of the service. + type: "array" + items: + $ref: "#/definitions/Mount" + StopSignal: + description: "Signal to stop the container." + type: "string" + StopGracePeriod: + description: | + Amount of time to wait for the container to terminate before + forcefully killing it. + type: "integer" + format: "int64" + HealthCheck: + $ref: "#/definitions/HealthConfig" + Hosts: + type: "array" + description: | + A list of hostname/IP mappings to add to the container's `hosts` + file. The format of extra hosts is specified in the + [hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html) + man page: + + IP_address canonical_hostname [aliases...] + items: + type: "string" + DNSConfig: + description: | + Specification for DNS related configurations in resolver configuration + file (`resolv.conf`). + type: "object" + properties: + Nameservers: + description: "The IP addresses of the name servers." + type: "array" + items: + type: "string" + Search: + description: "A search list for host-name lookup." + type: "array" + items: + type: "string" + Options: + description: | + A list of internal resolver variables to be modified (e.g., + `debug`, `ndots:3`, etc.). + type: "array" + items: + type: "string" + Secrets: + description: | + Secrets contains references to zero or more secrets that will be + exposed to the service. + type: "array" + items: + type: "object" + properties: + File: + description: | + File represents a specific target that is backed by a file. + type: "object" + properties: + Name: + description: | + Name represents the final filename in the filesystem. + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + SecretID: + description: | + SecretID represents the ID of the specific secret that we're + referencing. + type: "string" + SecretName: + description: | + SecretName is the name of the secret that this references, + but this is just provided for lookup/display purposes. The + secret in the reference will be identified by its ID. + type: "string" + OomScoreAdj: + type: "integer" + format: "int64" + description: | + An integer value containing the score given to the container in + order to tune OOM killer preferences. + example: 0 + Configs: + description: | + Configs contains references to zero or more configs that will be + exposed to the service. + type: "array" + items: + type: "object" + properties: + File: + description: | + File represents a specific target that is backed by a file. + +


+ + > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive + type: "object" + properties: + Name: + description: | + Name represents the final filename in the filesystem. + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + Runtime: + description: | + Runtime represents a target that is not mounted into the + container but is used by the task + +


+ + > **Note**: `Configs.File` and `Configs.Runtime` are mutually + > exclusive + type: "object" + ConfigID: + description: | + ConfigID represents the ID of the specific config that we're + referencing. + type: "string" + ConfigName: + description: | + ConfigName is the name of the config that this references, + but this is just provided for lookup/display purposes. The + config in the reference will be identified by its ID. + type: "string" + Isolation: + type: "string" + description: | + Isolation technology of the containers running the service. + (Windows only) + enum: + - "default" + - "process" + - "hyperv" + - "" + Init: + description: | + Run an init inside the container that forwards signals and reaps + processes. This field is omitted if empty, and the default (as + configured on the daemon) is used. + type: "boolean" + x-nullable: true + Sysctls: + description: | + Set kernel namedspaced parameters (sysctls) in the container. + The Sysctls option on services accepts the same sysctls as the + are supported on containers. Note that while the same sysctls are + supported, no guarantees or checks are made about their + suitability for a clustered environment, and it's up to the user + to determine whether a given sysctl will work properly in a + Service. + type: "object" + additionalProperties: + type: "string" + # This option is not used by Windows containers + CapabilityAdd: + type: "array" + description: | + A list of kernel capabilities to add to the default set + for the container. + items: + type: "string" + example: + - "CAP_NET_RAW" + - "CAP_SYS_ADMIN" + - "CAP_SYS_CHROOT" + - "CAP_SYSLOG" + CapabilityDrop: + type: "array" + description: | + A list of kernel capabilities to drop from the default set + for the container. + items: + type: "string" + example: + - "CAP_NET_RAW" + Ulimits: + description: | + A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + NetworkAttachmentSpec: + description: | + Read-only spec type for non-swarm containers attached to swarm overlay + networks. + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + type: "object" + properties: + ContainerID: + description: "ID of the container represented by this task" + type: "string" + Resources: + description: | + Resource requirements which apply to each individual container created + as part of the service. + type: "object" + properties: + Limits: + description: "Define resources limits." + $ref: "#/definitions/Limit" + Reservations: + description: "Define resources reservation." + $ref: "#/definitions/ResourceObject" + RestartPolicy: + description: | + Specification for the restart policy which applies to containers + created as part of this service. + type: "object" + properties: + Condition: + description: "Condition for restart." + type: "string" + enum: + - "none" + - "on-failure" + - "any" + Delay: + description: "Delay between restart attempts." + type: "integer" + format: "int64" + MaxAttempts: + description: | + Maximum attempts to restart a given container before giving up + (default value is 0, which is ignored). + type: "integer" + format: "int64" + default: 0 + Window: + description: | + Windows is the time window used to evaluate the restart policy + (default value is 0, which is unbounded). + type: "integer" + format: "int64" + default: 0 + Placement: + type: "object" + properties: + Constraints: + description: | + An array of constraint expressions to limit the set of nodes where + a task can be scheduled. Constraint expressions can either use a + _match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find + nodes that satisfy every expression (AND match). Constraints can + match node or Docker Engine labels as follows: + + node attribute | matches | example + ---------------------|--------------------------------|----------------------------------------------- + `node.id` | Node ID | `node.id==2ivku8v2gvtg4` + `node.hostname` | Node hostname | `node.hostname!=node-2` + `node.role` | Node role (`manager`/`worker`) | `node.role==manager` + `node.platform.os` | Node operating system | `node.platform.os==windows` + `node.platform.arch` | Node architecture | `node.platform.arch==x86_64` + `node.labels` | User-defined node labels | `node.labels.security==high` + `engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-24.04` + + `engine.labels` apply to Docker Engine labels like operating system, + drivers, etc. Swarm administrators add `node.labels` for operational + purposes by using the [`node update endpoint`](#operation/NodeUpdate). + + type: "array" + items: + type: "string" + example: + - "node.hostname!=node3.corp.example.com" + - "node.role!=manager" + - "node.labels.type==production" + - "node.platform.os==linux" + - "node.platform.arch==x86_64" + Preferences: + description: | + Preferences provide a way to make the scheduler aware of factors + such as topology. They are provided in order from highest to + lowest precedence. + type: "array" + items: + type: "object" + properties: + Spread: + type: "object" + properties: + SpreadDescriptor: + description: | + label descriptor, such as `engine.labels.az`. + type: "string" + example: + - Spread: + SpreadDescriptor: "node.labels.datacenter" + - Spread: + SpreadDescriptor: "node.labels.rack" + MaxReplicas: + description: | + Maximum number of replicas for per node (default value is 0, which + is unlimited) + type: "integer" + format: "int64" + default: 0 + Platforms: + description: | + Platforms stores all the platforms that the service's image can + run on. This field is used in the platform filter for scheduling. + If empty, then the platform filter is off, meaning there are no + scheduling restrictions. + type: "array" + items: + $ref: "#/definitions/Platform" + ForceUpdate: + description: | + A counter that triggers an update even if no relevant parameters have + been changed. + type: "integer" + Runtime: + description: | + Runtime is the type of runtime specified for the task executor. + type: "string" + Networks: + description: "Specifies which networks the service should attach to." + type: "array" + items: + $ref: "#/definitions/NetworkAttachmentConfig" + LogDriver: + description: | + Specifies the log driver to use for tasks created from this spec. If + not present, the default one for the swarm will be used, finally + falling back to the engine default if not specified. + type: "object" + properties: + Name: + type: "string" + Options: + type: "object" + additionalProperties: + type: "string" + + TaskState: + type: "string" + enum: + - "new" + - "allocated" + - "pending" + - "assigned" + - "accepted" + - "preparing" + - "ready" + - "starting" + - "running" + - "complete" + - "shutdown" + - "failed" + - "rejected" + - "remove" + - "orphaned" + + ContainerStatus: + type: "object" + description: "represents the status of a container." + properties: + ContainerID: + type: "string" + PID: + type: "integer" + ExitCode: + type: "integer" + + PortStatus: + type: "object" + description: "represents the port status of a task's host ports whose service has published host ports" + properties: + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + + TaskStatus: + type: "object" + description: "represents the status of a task." + properties: + Timestamp: + type: "string" + format: "dateTime" + State: + $ref: "#/definitions/TaskState" + Message: + type: "string" + Err: + type: "string" + ContainerStatus: + $ref: "#/definitions/ContainerStatus" + PortStatus: + $ref: "#/definitions/PortStatus" + + Task: + type: "object" + properties: + ID: + description: "The ID of the task." + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Name: + description: "Name of the task." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Spec: + $ref: "#/definitions/TaskSpec" + ServiceID: + description: "The ID of the service this task is part of." + type: "string" + Slot: + type: "integer" + NodeID: + description: "The ID of the node that this task is on." + type: "string" + AssignedGenericResources: + $ref: "#/definitions/GenericResources" + Status: + $ref: "#/definitions/TaskStatus" + DesiredState: + $ref: "#/definitions/TaskState" + JobIteration: + description: | + If the Service this Task belongs to is a job-mode service, contains + the JobIteration of the Service this Task was created for. Absent if + the Task was created for a Replicated or Global Service. + $ref: "#/definitions/ObjectVersion" + example: + ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + AssignedGenericResources: + - DiscreteResourceSpec: + Kind: "SSD" + Value: 3 + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID1" + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID2" + + ServiceSpec: + description: "User modifiable configuration for a service." + type: object + properties: + Name: + description: "Name of the service." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + TaskTemplate: + $ref: "#/definitions/TaskSpec" + Mode: + description: "Scheduling mode for the service." + type: "object" + properties: + Replicated: + type: "object" + properties: + Replicas: + type: "integer" + format: "int64" + Global: + type: "object" + ReplicatedJob: + description: | + The mode used for services with a finite number of tasks that run + to a completed state. + type: "object" + properties: + MaxConcurrent: + description: | + The maximum number of replicas to run simultaneously. + type: "integer" + format: "int64" + default: 1 + TotalCompletions: + description: | + The total number of replicas desired to reach the Completed + state. If unset, will default to the value of `MaxConcurrent` + type: "integer" + format: "int64" + GlobalJob: + description: | + The mode used for services which run a task to the completed state + on each valid node. + type: "object" + UpdateConfig: + description: "Specification for the update strategy of the service." + type: "object" + properties: + Parallelism: + description: | + Maximum number of tasks to be updated in one iteration (0 means + unlimited parallelism). + type: "integer" + format: "int64" + Delay: + description: "Amount of time between updates, in nanoseconds." + type: "integer" + format: "int64" + FailureAction: + description: | + Action to take if an updated task fails to run, or stops running + during the update. + type: "string" + enum: + - "continue" + - "pause" + - "rollback" + Monitor: + description: | + Amount of time to monitor each updated task for failures, in + nanoseconds. + type: "integer" + format: "int64" + MaxFailureRatio: + description: | + The fraction of tasks that may fail during an update before the + failure action is invoked, specified as a floating point number + between 0 and 1. + type: "number" + default: 0 + Order: + description: | + The order of operations when rolling out an updated task. Either + the old task is shut down before the new task is started, or the + new task is started before the old task is shut down. + type: "string" + enum: + - "stop-first" + - "start-first" + RollbackConfig: + description: "Specification for the rollback strategy of the service." + type: "object" + properties: + Parallelism: + description: | + Maximum number of tasks to be rolled back in one iteration (0 means + unlimited parallelism). + type: "integer" + format: "int64" + Delay: + description: | + Amount of time between rollback iterations, in nanoseconds. + type: "integer" + format: "int64" + FailureAction: + description: | + Action to take if an rolled back task fails to run, or stops + running during the rollback. + type: "string" + enum: + - "continue" + - "pause" + Monitor: + description: | + Amount of time to monitor each rolled back task for failures, in + nanoseconds. + type: "integer" + format: "int64" + MaxFailureRatio: + description: | + The fraction of tasks that may fail during a rollback before the + failure action is invoked, specified as a floating point number + between 0 and 1. + type: "number" + default: 0 + Order: + description: | + The order of operations when rolling back a task. Either the old + task is shut down before the new task is started, or the new task + is started before the old task is shut down. + type: "string" + enum: + - "stop-first" + - "start-first" + Networks: + description: | + Specifies which networks the service should attach to. + + Deprecated: This field is deprecated since v1.44. The Networks field in TaskSpec should be used instead. + type: "array" + items: + $ref: "#/definitions/NetworkAttachmentConfig" + + EndpointSpec: + $ref: "#/definitions/EndpointSpec" + + EndpointPortConfig: + type: "object" + properties: + Name: + type: "string" + Protocol: + type: "string" + enum: + - "tcp" + - "udp" + - "sctp" + TargetPort: + description: "The port inside the container." + type: "integer" + PublishedPort: + description: "The port on the swarm hosts." + type: "integer" + PublishMode: + description: | + The mode in which port is published. + +


+ + - "ingress" makes the target port accessible on every node, + regardless of whether there is a task for the service running on + that node or not. + - "host" bypasses the routing mesh and publish the port directly on + the swarm node where that service is running. + + type: "string" + enum: + - "ingress" + - "host" + default: "ingress" + example: "ingress" + + EndpointSpec: + description: "Properties that can be configured to access and load balance a service." + type: "object" + properties: + Mode: + description: | + The mode of resolution to use for internal load balancing between tasks. + type: "string" + enum: + - "vip" + - "dnsrr" + default: "vip" + Ports: + description: | + List of exposed ports that this service is accessible on from the + outside. Ports can only be provided if `vip` resolution mode is used. + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + + Service: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ServiceSpec" + Endpoint: + type: "object" + properties: + Spec: + $ref: "#/definitions/EndpointSpec" + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + VirtualIPs: + type: "array" + items: + type: "object" + properties: + NetworkID: + type: "string" + Addr: + type: "string" + UpdateStatus: + description: "The status of a service update." + type: "object" + properties: + State: + type: "string" + enum: + - "updating" + - "paused" + - "completed" + StartedAt: + type: "string" + format: "dateTime" + CompletedAt: + type: "string" + format: "dateTime" + Message: + type: "string" + ServiceStatus: + description: | + The status of the service's tasks. Provided only when requested as + part of a ServiceList operation. + type: "object" + properties: + RunningTasks: + description: | + The number of tasks for the service currently in the Running state. + type: "integer" + format: "uint64" + example: 7 + DesiredTasks: + description: | + The number of tasks for the service desired to be running. + For replicated services, this is the replica count from the + service spec. For global services, this is computed by taking + count of all tasks for the service with a Desired State other + than Shutdown. + type: "integer" + format: "uint64" + example: 10 + CompletedTasks: + description: | + The number of tasks for a job that are in the Completed state. + This field must be cross-referenced with the service type, as the + value of 0 may mean the service is not in a job mode, or it may + mean the job-mode service has no tasks yet Completed. + type: "integer" + format: "uint64" + JobStatus: + description: | + The status of the service when it is in one of ReplicatedJob or + GlobalJob modes. Absent on Replicated and Global mode services. The + JobIteration is an ObjectVersion, but unlike the Service's version, + does not need to be sent with an update request. + type: "object" + properties: + JobIteration: + description: | + JobIteration is a value increased each time a Job is executed, + successfully or otherwise. "Executed", in this case, means the + job as a whole has been started, not that an individual Task has + been launched. A job is "Executed" when its ServiceSpec is + updated. JobIteration can be used to disambiguate Tasks belonging + to different executions of a job. Though JobIteration will + increase with each subsequent execution, it may not necessarily + increase by 1, and so JobIteration should not be used to + $ref: "#/definitions/ObjectVersion" + LastExecution: + description: | + The last time, as observed by the server, that this job was + started. + type: "string" + format: "dateTime" + example: + ID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Version: + Index: 19 + CreatedAt: "2016-06-07T21:05:51.880065305Z" + UpdatedAt: "2016-06-07T21:07:29.962229872Z" + Spec: + Name: "hopeful_cori" + TaskTemplate: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Endpoint: + Spec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + VirtualIPs: + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.2/16" + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.3/16" + + ImageDeleteResponseItem: + type: "object" + x-go-name: "DeleteResponse" + properties: + Untagged: + description: "The image ID of an image that was untagged" + type: "string" + Deleted: + description: "The image ID of an image that was deleted" + type: "string" + + ServiceCreateResponse: + type: "object" + description: | + contains the information returned to a client on the + creation of a new service. + properties: + ID: + description: "The ID of the created service." + type: "string" + x-nullable: false + example: "ak7w3gjqoa3kuz8xcpnyy0pvl" + Warnings: + description: | + Optional warning message. + + FIXME(thaJeztah): this should have "omitempty" in the generated type. + type: "array" + x-nullable: true + items: + type: "string" + example: + - "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + + ServiceUpdateResponse: + type: "object" + properties: + Warnings: + description: "Optional warning messages" + type: "array" + items: + type: "string" + example: + Warnings: + - "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + + ContainerInspectResponse: + type: "object" + title: "ContainerInspectResponse" + x-go-name: "InspectResponse" + properties: + Id: + description: |- + The ID of this container as a 128-bit (64-character) hexadecimal string (32 bytes). + type: "string" + x-go-name: "ID" + minLength: 64 + maxLength: 64 + pattern: "^[0-9a-fA-F]{64}$" + example: "aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf" + Created: + description: |- + Date and time at which the container was created, formatted in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + x-nullable: true + example: "2025-02-17T17:43:39.64001363Z" + Path: + description: |- + The path to the command being run + type: "string" + example: "/bin/sh" + Args: + description: "The arguments to the command being run" + type: "array" + items: + type: "string" + example: + - "-c" + - "exit 9" + State: + $ref: "#/definitions/ContainerState" + Image: + description: |- + The ID (digest) of the image that this container was created from. + type: "string" + example: "sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782" + ResolvConfPath: + description: |- + Location of the `/etc/resolv.conf` generated for the container on the + host. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/resolv.conf" + HostnamePath: + description: |- + Location of the `/etc/hostname` generated for the container on the + host. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/hostname" + HostsPath: + description: |- + Location of the `/etc/hosts` generated for the container on the + host. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/hosts" + LogPath: + description: |- + Location of the file used to buffer the container's logs. Depending on + the logging-driver used for the container, this field may be omitted. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + x-nullable: true + example: "/var/lib/docker/containers/5b7c7e2b992aa426584ce6c47452756066be0e503a08b4516a433a54d2f69e59/5b7c7e2b992aa426584ce6c47452756066be0e503a08b4516a433a54d2f69e59-json.log" + Name: + description: |- + The name associated with this container. + + For historic reasons, the name may be prefixed with a forward-slash (`/`). + type: "string" + example: "/funny_chatelet" + RestartCount: + description: |- + Number of times the container was restarted since it was created, + or since daemon was started. + type: "integer" + example: 0 + Driver: + description: |- + The storage-driver used for the container's filesystem (graph-driver + or snapshotter). + type: "string" + example: "overlayfs" + Platform: + description: |- + The platform (operating system) for which the container was created. + + This field was introduced for the experimental "LCOW" (Linux Containers + On Windows) features, which has been removed. In most cases, this field + is equal to the host's operating system (`linux` or `windows`). + type: "string" + example: "linux" + ImageManifestDescriptor: + $ref: "#/definitions/OCIDescriptor" + description: |- + OCI descriptor of the platform-specific manifest of the image + the container was created from. + + Note: Only available if the daemon provides a multi-platform + image store. + MountLabel: + description: |- + SELinux mount label set for the container. + type: "string" + example: "" + ProcessLabel: + description: |- + SELinux process label set for the container. + type: "string" + example: "" + AppArmorProfile: + description: |- + The AppArmor profile set for the container. + type: "string" + example: "" + ExecIDs: + description: |- + IDs of exec instances that are running in the container. + type: "array" + items: + type: "string" + x-nullable: true + example: + - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" + - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" + HostConfig: + $ref: "#/definitions/HostConfig" + GraphDriver: + $ref: "#/definitions/DriverData" + SizeRw: + description: |- + The size of files that have been created or changed by this container. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "122880" + SizeRootFs: + description: |- + The total size of all files in the read-only layers from the image + that the container uses. These layers can be shared between containers. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "1653948416" + Mounts: + description: |- + List of mounts used by the container. + type: "array" + items: + $ref: "#/definitions/MountPoint" + Config: + $ref: "#/definitions/ContainerConfig" + NetworkSettings: + $ref: "#/definitions/NetworkSettings" + + ContainerSummary: + type: "object" + properties: + Id: + description: |- + The ID of this container as a 128-bit (64-character) hexadecimal string (32 bytes). + type: "string" + x-go-name: "ID" + minLength: 64 + maxLength: 64 + pattern: "^[0-9a-fA-F]{64}$" + example: "aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf" + Names: + description: |- + The names associated with this container. Most containers have a single + name, but when using legacy "links", the container can have multiple + names. + + For historic reasons, names are prefixed with a forward-slash (`/`). + type: "array" + items: + type: "string" + example: + - "/funny_chatelet" + Image: + description: |- + The name or ID of the image used to create the container. + + This field shows the image reference as was specified when creating the container, + which can be in its canonical form (e.g., `docker.io/library/ubuntu:latest` + or `docker.io/library/ubuntu@sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782`), + short form (e.g., `ubuntu:latest`)), or the ID(-prefix) of the image (e.g., `72297848456d`). + + The content of this field can be updated at runtime if the image used to + create the container is untagged, in which case the field is updated to + contain the the image ID (digest) it was resolved to in its canonical, + non-truncated form (e.g., `sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782`). + type: "string" + example: "docker.io/library/ubuntu:latest" + ImageID: + description: |- + The ID (digest) of the image that this container was created from. + type: "string" + example: "sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782" + ImageManifestDescriptor: + $ref: "#/definitions/OCIDescriptor" + x-nullable: true + description: | + OCI descriptor of the platform-specific manifest of the image + the container was created from. + + Note: Only available if the daemon provides a multi-platform + image store. + + This field is not populated in the `GET /system/df` endpoint. + Command: + description: "Command to run when starting the container" + type: "string" + example: "/bin/bash" + Created: + description: |- + Date and time at which the container was created as a Unix timestamp + (number of seconds since EPOCH). + type: "integer" + format: "int64" + example: "1739811096" + Ports: + description: |- + Port-mappings for the container. + type: "array" + items: + $ref: "#/definitions/Port" + SizeRw: + description: |- + The size of files that have been created or changed by this container. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "122880" + SizeRootFs: + description: |- + The total size of all files in the read-only layers from the image + that the container uses. These layers can be shared between containers. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "1653948416" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + State: + description: | + The state of this container. + type: "string" + enum: + - "created" + - "running" + - "paused" + - "restarting" + - "exited" + - "removing" + - "dead" + example: "running" + Status: + description: |- + Additional human-readable status of this container (e.g. `Exit 0`) + type: "string" + example: "Up 4 days" + HostConfig: + type: "object" + description: |- + Summary of host-specific runtime information of the container. This + is a reduced set of information in the container's "HostConfig" as + available in the container "inspect" response. + properties: + NetworkMode: + description: |- + Networking mode (`host`, `none`, `container:`) or name of the + primary network the container is using. + + This field is primarily for backward compatibility. The container + can be connected to multiple networks for which information can be + found in the `NetworkSettings.Networks` field, which enumerates + settings per network. + type: "string" + example: "mynetwork" + Annotations: + description: |- + Arbitrary key-value metadata attached to the container. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + io.kubernetes.docker.type: "container" + io.kubernetes.sandbox.id: "3befe639bed0fd6afdd65fd1fa84506756f59360ec4adc270b0fdac9be22b4d3" + NetworkSettings: + description: |- + Summary of the container's network settings + type: "object" + properties: + Networks: + type: "object" + description: |- + Summary of network-settings for each network the container is + attached to. + additionalProperties: + $ref: "#/definitions/EndpointSettings" + Mounts: + type: "array" + description: |- + List of mounts used by the container. + items: + $ref: "#/definitions/MountPoint" + + Driver: + description: "Driver represents a driver (network, logging, secrets)." + type: "object" + required: [Name] + properties: + Name: + description: "Name of the driver." + type: "string" + x-nullable: false + example: "some-driver" + Options: + description: "Key/value map of driver-specific options." + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: + OptionA: "value for driver-specific option A" + OptionB: "value for driver-specific option B" + + SecretSpec: + type: "object" + properties: + Name: + description: "User-defined name of the secret." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Data: + description: | + Data is the data to store as a secret, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + It must be empty if the Driver field is set, in which case the data is + loaded from an external secret store. The maximum allowed size is 500KB, + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). + + This field is only used to _create_ a secret, and is not returned by + other endpoints. + type: "string" + example: "" + Driver: + description: | + Name of the secrets driver used to fetch the secret's value from an + external secret store. + $ref: "#/definitions/Driver" + Templating: + description: | + Templating driver, if applicable + + Templating controls whether and how to evaluate the config payload as + a template. If no driver is set, no templating is used. + $ref: "#/definitions/Driver" + + Secret: + type: "object" + properties: + ID: + type: "string" + example: "blt1owaxmitz71s9v5zh81zun" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + example: "2017-07-20T13:55:28.678958722Z" + UpdatedAt: + type: "string" + format: "dateTime" + example: "2017-07-20T13:55:28.678958722Z" + Spec: + $ref: "#/definitions/SecretSpec" + + ConfigSpec: + type: "object" + properties: + Name: + description: "User-defined name of the config." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Data: + description: | + Data is the data to store as a config, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). + type: "string" + Templating: + description: | + Templating driver, if applicable + + Templating controls whether and how to evaluate the config payload as + a template. If no driver is set, no templating is used. + $ref: "#/definitions/Driver" + + Config: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ConfigSpec" + + ContainerState: + description: | + ContainerState stores container's running state. It's part of ContainerJSONBase + and will be returned by the "inspect" command. + type: "object" + x-nullable: true + properties: + Status: + description: | + String representation of the container state. Can be one of "created", + "running", "paused", "restarting", "removing", "exited", or "dead". + type: "string" + enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"] + example: "running" + Running: + description: | + Whether this container is running. + + Note that a running container can be _paused_. The `Running` and `Paused` + booleans are not mutually exclusive: + + When pausing a container (on Linux), the freezer cgroup is used to suspend + all processes in the container. Freezing the process requires the process to + be running. As a result, paused containers are both `Running` _and_ `Paused`. + + Use the `Status` field instead to determine if a container's state is "running". + type: "boolean" + example: true + Paused: + description: "Whether this container is paused." + type: "boolean" + example: false + Restarting: + description: "Whether this container is restarting." + type: "boolean" + example: false + OOMKilled: + description: | + Whether a process within this container has been killed because it ran + out of memory since the container was last started. + type: "boolean" + example: false + Dead: + type: "boolean" + example: false + Pid: + description: "The process ID of this container" + type: "integer" + example: 1234 + ExitCode: + description: "The last exit code of this container" + type: "integer" + example: 0 + Error: + type: "string" + StartedAt: + description: "The time when this container was last started." + type: "string" + example: "2020-01-06T09:06:59.461876391Z" + FinishedAt: + description: "The time when this container last exited." + type: "string" + example: "2020-01-06T09:07:59.461876391Z" + Health: + $ref: "#/definitions/Health" + + ContainerCreateResponse: + description: "OK response to ContainerCreate operation" + type: "object" + title: "ContainerCreateResponse" + x-go-name: "CreateResponse" + required: [Id, Warnings] + properties: + Id: + description: "The ID of the created container" + type: "string" + x-nullable: false + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Warnings: + description: "Warnings encountered when creating the container" + type: "array" + x-nullable: false + items: + type: "string" + example: [] + + ContainerUpdateResponse: + type: "object" + title: "ContainerUpdateResponse" + x-go-name: "UpdateResponse" + description: |- + Response for a successful container-update. + properties: + Warnings: + type: "array" + description: |- + Warnings encountered when updating the container. + items: + type: "string" + example: ["Published ports are discarded when using host network mode"] + + ContainerStatsResponse: + description: | + Statistics sample for a container. + type: "object" + x-go-name: "StatsResponse" + title: "ContainerStatsResponse" + properties: + name: + description: "Name of the container" + type: "string" + x-nullable: true + example: "boring_wozniak" + id: + description: "ID of the container" + type: "string" + x-nullable: true + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + read: + description: | + Date and time at which this sample was collected. + The value is formatted as [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + with nano-seconds. + type: "string" + format: "date-time" + example: "2025-01-16T13:55:22.165243637Z" + preread: + description: | + Date and time at which this first sample was collected. This field + is not propagated if the "one-shot" option is set. If the "one-shot" + option is set, this field may be omitted, empty, or set to a default + date (`0001-01-01T00:00:00Z`). + + The value is formatted as [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + with nano-seconds. + type: "string" + format: "date-time" + example: "2025-01-16T13:55:21.160452595Z" + pids_stats: + $ref: "#/definitions/ContainerPidsStats" + blkio_stats: + $ref: "#/definitions/ContainerBlkioStats" + num_procs: + description: | + The number of processors on the system. + + This field is Windows-specific and always zero for Linux containers. + type: "integer" + format: "uint32" + example: 16 + storage_stats: + $ref: "#/definitions/ContainerStorageStats" + cpu_stats: + $ref: "#/definitions/ContainerCPUStats" + precpu_stats: + $ref: "#/definitions/ContainerCPUStats" + memory_stats: + $ref: "#/definitions/ContainerMemoryStats" + networks: + description: | + Network statistics for the container per interface. + + This field is omitted if the container has no networking enabled. + x-nullable: true + additionalProperties: + $ref: "#/definitions/ContainerNetworkStats" + example: + eth0: + rx_bytes: 5338 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 36 + tx_bytes: 648 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 8 + eth5: + rx_bytes: 4641 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 26 + tx_bytes: 690 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 9 + + ContainerBlkioStats: + description: | + BlkioStats stores all IO service stats for data read and write. + + This type is Linux-specific and holds many fields that are specific to cgroups v1. + On a cgroup v2 host, all fields other than `io_service_bytes_recursive` + are omitted or `null`. + + This type is only populated on Linux and omitted for Windows containers. + type: "object" + x-go-name: "BlkioStats" + x-nullable: true + properties: + io_service_bytes_recursive: + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_serviced_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_queue_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_service_time_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_wait_time_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_merged_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_time_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + sectors_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + example: + io_service_bytes_recursive: [ + {"major": 254, "minor": 0, "op": "read", "value": 7593984}, + {"major": 254, "minor": 0, "op": "write", "value": 100} + ] + io_serviced_recursive: null + io_queue_recursive: null + io_service_time_recursive: null + io_wait_time_recursive: null + io_merged_recursive: null + io_time_recursive: null + sectors_recursive: null + + ContainerBlkioStatEntry: + description: | + Blkio stats entry. + + This type is Linux-specific and omitted for Windows containers. + type: "object" + x-go-name: "BlkioStatEntry" + x-nullable: true + properties: + major: + type: "integer" + format: "uint64" + example: 254 + minor: + type: "integer" + format: "uint64" + example: 0 + op: + type: "string" + example: "read" + value: + type: "integer" + format: "uint64" + example: 7593984 + + ContainerCPUStats: + description: | + CPU related info of the container + type: "object" + x-go-name: "CPUStats" + x-nullable: true + properties: + cpu_usage: + $ref: "#/definitions/ContainerCPUUsage" + system_cpu_usage: + description: | + System Usage. + + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 5 + online_cpus: + description: | + Number of online CPUs. + + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint32" + x-nullable: true + example: 5 + throttling_data: + $ref: "#/definitions/ContainerThrottlingData" + + ContainerCPUUsage: + description: | + All CPU stats aggregated since container inception. + type: "object" + x-go-name: "CPUUsage" + x-nullable: true + properties: + total_usage: + description: | + Total CPU time consumed in nanoseconds (Linux) or 100's of nanoseconds (Windows). + type: "integer" + format: "uint64" + example: 29912000 + percpu_usage: + description: | + Total CPU time (in nanoseconds) consumed per core (Linux). + + This field is Linux-specific when using cgroups v1. It is omitted + when using cgroups v2 and Windows containers. + type: "array" + x-nullable: true + items: + type: "integer" + format: "uint64" + example: 29912000 + + usage_in_kernelmode: + description: | + Time (in nanoseconds) spent by tasks of the cgroup in kernel mode (Linux), + or time spent (in 100's of nanoseconds) by all container processes in + kernel mode (Windows). + + Not populated for Windows containers using Hyper-V isolation. + type: "integer" + format: "uint64" + example: 21994000 + usage_in_usermode: + description: | + Time (in nanoseconds) spent by tasks of the cgroup in user mode (Linux), + or time spent (in 100's of nanoseconds) by all container processes in + kernel mode (Windows). + + Not populated for Windows containers using Hyper-V isolation. + type: "integer" + format: "uint64" + example: 7918000 + + ContainerPidsStats: + description: | + PidsStats contains Linux-specific stats of a container's process-IDs (PIDs). + + This type is Linux-specific and omitted for Windows containers. + type: "object" + x-go-name: "PidsStats" + x-nullable: true + properties: + current: + description: | + Current is the number of PIDs in the cgroup. + type: "integer" + format: "uint64" + x-nullable: true + example: 5 + limit: + description: | + Limit is the hard limit on the number of pids in the cgroup. + A "Limit" of 0 means that there is no limit. + type: "integer" + format: "uint64" + x-nullable: true + example: "18446744073709551615" + + ContainerThrottlingData: + description: | + CPU throttling stats of the container. + + This type is Linux-specific and omitted for Windows containers. + type: "object" + x-go-name: "ThrottlingData" + x-nullable: true + properties: + periods: + description: | + Number of periods with throttling active. + type: "integer" + format: "uint64" + example: 0 + throttled_periods: + description: | + Number of periods when the container hit its throttling limit. + type: "integer" + format: "uint64" + example: 0 + throttled_time: + description: | + Aggregated time (in nanoseconds) the container was throttled for. + type: "integer" + format: "uint64" + example: 0 + + ContainerMemoryStats: + description: | + Aggregates all memory stats since container inception on Linux. + Windows returns stats for commit and private working set only. + type: "object" + x-go-name: "MemoryStats" + properties: + usage: + description: | + Current `res_counter` usage for memory. + + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + max_usage: + description: | + Maximum usage ever recorded. + + This field is Linux-specific and only supported on cgroups v1. + It is omitted when using cgroups v2 and for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + stats: + description: | + All the stats exported via memory.stat. when using cgroups v2. + + This field is Linux-specific and omitted for Windows containers. + type: "object" + additionalProperties: + type: "integer" + format: "uint64" + x-nullable: true + example: + { + "active_anon": 1572864, + "active_file": 5115904, + "anon": 1572864, + "anon_thp": 0, + "file": 7626752, + "file_dirty": 0, + "file_mapped": 2723840, + "file_writeback": 0, + "inactive_anon": 0, + "inactive_file": 2510848, + "kernel_stack": 16384, + "pgactivate": 0, + "pgdeactivate": 0, + "pgfault": 2042, + "pglazyfree": 0, + "pglazyfreed": 0, + "pgmajfault": 45, + "pgrefill": 0, + "pgscan": 0, + "pgsteal": 0, + "shmem": 0, + "slab": 1180928, + "slab_reclaimable": 725576, + "slab_unreclaimable": 455352, + "sock": 0, + "thp_collapse_alloc": 0, + "thp_fault_alloc": 1, + "unevictable": 0, + "workingset_activate": 0, + "workingset_nodereclaim": 0, + "workingset_refault": 0 + } + failcnt: + description: | + Number of times memory usage hits limits. + + This field is Linux-specific and only supported on cgroups v1. + It is omitted when using cgroups v2 and for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + limit: + description: | + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 8217579520 + commitbytes: + description: | + Committed bytes. + + This field is Windows-specific and omitted for Linux containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + commitpeakbytes: + description: | + Peak committed bytes. + + This field is Windows-specific and omitted for Linux containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + privateworkingset: + description: | + Private working set. + + This field is Windows-specific and omitted for Linux containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + + ContainerNetworkStats: + description: | + Aggregates the network stats of one container + type: "object" + x-go-name: "NetworkStats" + x-nullable: true + properties: + rx_bytes: + description: | + Bytes received. Windows and Linux. + type: "integer" + format: "uint64" + example: 5338 + rx_packets: + description: | + Packets received. Windows and Linux. + type: "integer" + format: "uint64" + example: 36 + rx_errors: + description: | + Received errors. Not used on Windows. + + This field is Linux-specific and always zero for Windows containers. + type: "integer" + format: "uint64" + example: 0 + rx_dropped: + description: | + Incoming packets dropped. Windows and Linux. + type: "integer" + format: "uint64" + example: 0 + tx_bytes: + description: | + Bytes sent. Windows and Linux. + type: "integer" + format: "uint64" + example: 1200 + tx_packets: + description: | + Packets sent. Windows and Linux. + type: "integer" + format: "uint64" + example: 12 + tx_errors: + description: | + Sent errors. Not used on Windows. + + This field is Linux-specific and always zero for Windows containers. + type: "integer" + format: "uint64" + example: 0 + tx_dropped: + description: | + Outgoing packets dropped. Windows and Linux. + type: "integer" + format: "uint64" + example: 0 + endpoint_id: + description: | + Endpoint ID. Not used on Linux. + + This field is Windows-specific and omitted for Linux containers. + type: "string" + x-nullable: true + instance_id: + description: | + Instance ID. Not used on Linux. + + This field is Windows-specific and omitted for Linux containers. + type: "string" + x-nullable: true + + ContainerStorageStats: + description: | + StorageStats is the disk I/O stats for read/write on Windows. + + This type is Windows-specific and omitted for Linux containers. + type: "object" + x-go-name: "StorageStats" + x-nullable: true + properties: + read_count_normalized: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + read_size_bytes: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + write_count_normalized: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + write_size_bytes: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + + ContainerTopResponse: + type: "object" + x-go-name: "TopResponse" + title: "ContainerTopResponse" + description: |- + Container "top" response. + properties: + Titles: + description: "The ps column titles" + type: "array" + items: + type: "string" + example: + Titles: + - "UID" + - "PID" + - "PPID" + - "C" + - "STIME" + - "TTY" + - "TIME" + - "CMD" + Processes: + description: |- + Each process running in the container, where each process + is an array of values corresponding to the titles. + type: "array" + items: + type: "array" + items: + type: "string" + example: + Processes: + - + - "root" + - "13642" + - "882" + - "0" + - "17:03" + - "pts/0" + - "00:00:00" + - "/bin/bash" + - + - "root" + - "13735" + - "13642" + - "0" + - "17:06" + - "pts/0" + - "00:00:00" + - "sleep 10" + + ContainerWaitResponse: + description: "OK response to ContainerWait operation" + type: "object" + x-go-name: "WaitResponse" + title: "ContainerWaitResponse" + required: [StatusCode] + properties: + StatusCode: + description: "Exit code of the container" + type: "integer" + format: "int64" + x-nullable: false + Error: + $ref: "#/definitions/ContainerWaitExitError" + + ContainerWaitExitError: + description: "container waiting error, if any" + type: "object" + x-go-name: "WaitExitError" + properties: + Message: + description: "Details of an error" + type: "string" + + SystemVersion: + type: "object" + description: | + Response of Engine API: GET "/version" + properties: + Platform: + type: "object" + required: [Name] + properties: + Name: + type: "string" + Components: + type: "array" + description: | + Information about system components + items: + type: "object" + x-go-name: ComponentVersion + required: [Name, Version] + properties: + Name: + description: | + Name of the component + type: "string" + example: "Engine" + Version: + description: | + Version of the component + type: "string" + x-nullable: false + example: "27.0.1" + Details: + description: | + Key/value pairs of strings with additional information about the + component. These values are intended for informational purposes + only, and their content is not defined, and not part of the API + specification. + + These messages can be printed by the client as information to the user. + type: "object" + x-nullable: true + Version: + description: "The version of the daemon" + type: "string" + example: "27.0.1" + ApiVersion: + description: | + The default (and highest) API version that is supported by the daemon + type: "string" + example: "1.47" + MinAPIVersion: + description: | + The minimum API version that is supported by the daemon + type: "string" + example: "1.24" + GitCommit: + description: | + The Git commit of the source code that was used to build the daemon + type: "string" + example: "48a66213fe" + GoVersion: + description: | + The version Go used to compile the daemon, and the version of the Go + runtime in use. + type: "string" + example: "go1.22.7" + Os: + description: | + The operating system that the daemon is running on ("linux" or "windows") + type: "string" + example: "linux" + Arch: + description: | + The architecture that the daemon is running on + type: "string" + example: "amd64" + KernelVersion: + description: | + The kernel version (`uname -r`) that the daemon is running on. + + This field is omitted when empty. + type: "string" + example: "6.8.0-31-generic" + Experimental: + description: | + Indicates if the daemon is started with experimental features enabled. + + This field is omitted when empty / false. + type: "boolean" + example: true + BuildTime: + description: | + The date and time that the daemon was compiled. + type: "string" + example: "2020-06-22T15:49:27.000000000+00:00" + + SystemInfo: + type: "object" + properties: + ID: + description: | + Unique identifier of the daemon. + +


+ + > **Note**: The format of the ID itself is not part of the API, and + > should not be considered stable. + type: "string" + example: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" + Containers: + description: "Total number of containers on the host." + type: "integer" + example: 14 + ContainersRunning: + description: | + Number of containers with status `"running"`. + type: "integer" + example: 3 + ContainersPaused: + description: | + Number of containers with status `"paused"`. + type: "integer" + example: 1 + ContainersStopped: + description: | + Number of containers with status `"stopped"`. + type: "integer" + example: 10 + Images: + description: | + Total number of images on the host. + + Both _tagged_ and _untagged_ (dangling) images are counted. + type: "integer" + example: 508 + Driver: + description: "Name of the storage driver in use." + type: "string" + example: "overlay2" + DriverStatus: + description: | + Information specific to the storage driver, provided as + "label" / "value" pairs. + + This information is provided by the storage driver, and formatted + in a way consistent with the output of `docker info` on the command + line. + +


+ + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "array" + items: + type: "array" + items: + type: "string" + example: + - ["Backing Filesystem", "extfs"] + - ["Supports d_type", "true"] + - ["Native Overlay Diff", "true"] + DockerRootDir: + description: | + Root directory of persistent Docker state. + + Defaults to `/var/lib/docker` on Linux, and `C:\ProgramData\docker` + on Windows. + type: "string" + example: "/var/lib/docker" + Plugins: + $ref: "#/definitions/PluginsInfo" + MemoryLimit: + description: "Indicates if the host has memory limit support enabled." + type: "boolean" + example: true + SwapLimit: + description: "Indicates if the host has memory swap limit support enabled." + type: "boolean" + example: true + KernelMemoryTCP: + description: | + Indicates if the host has kernel memory TCP limit support enabled. This + field is omitted if not supported. + + Kernel memory TCP limits are not supported when using cgroups v2, which + does not support the corresponding `memory.kmem.tcp.limit_in_bytes` cgroup. + type: "boolean" + example: true + CpuCfsPeriod: + description: | + Indicates if CPU CFS(Completely Fair Scheduler) period is supported by + the host. + type: "boolean" + example: true + CpuCfsQuota: + description: | + Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by + the host. + type: "boolean" + example: true + CPUShares: + description: | + Indicates if CPU Shares limiting is supported by the host. + type: "boolean" + example: true + CPUSet: + description: | + Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host. + + See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt) + type: "boolean" + example: true + PidsLimit: + description: "Indicates if the host kernel has PID limit support enabled." + type: "boolean" + example: true + OomKillDisable: + description: "Indicates if OOM killer disable is supported on the host." + type: "boolean" + IPv4Forwarding: + description: "Indicates IPv4 forwarding is enabled." + type: "boolean" + example: true + BridgeNfIptables: + description: | + Indicates if `bridge-nf-call-iptables` is available on the host when + the daemon was started. + +


+ + > **Deprecated**: netfilter module is now loaded on-demand and no longer + > during daemon startup, making this field obsolete. This field is always + > `false` and will be removed in a API v1.49. + type: "boolean" + example: false + BridgeNfIp6tables: + description: | + Indicates if `bridge-nf-call-ip6tables` is available on the host. + +


+ + > **Deprecated**: netfilter module is now loaded on-demand, and no longer + > during daemon startup, making this field obsolete. This field is always + > `false` and will be removed in a API v1.49. + type: "boolean" + example: false + Debug: + description: | + Indicates if the daemon is running in debug-mode / with debug-level + logging enabled. + type: "boolean" + example: true + NFd: + description: | + The total number of file Descriptors in use by the daemon process. + + This information is only returned if debug-mode is enabled. + type: "integer" + example: 64 + NGoroutines: + description: | + The number of goroutines that currently exist. + + This information is only returned if debug-mode is enabled. + type: "integer" + example: 174 + SystemTime: + description: | + Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + format with nano-seconds. + type: "string" + example: "2017-08-08T20:28:29.06202363Z" + LoggingDriver: + description: | + The logging driver to use as a default for new containers. + type: "string" + CgroupDriver: + description: | + The driver to use for managing cgroups. + type: "string" + enum: ["cgroupfs", "systemd", "none"] + default: "cgroupfs" + example: "cgroupfs" + CgroupVersion: + description: | + The version of the cgroup. + type: "string" + enum: ["1", "2"] + default: "1" + example: "1" + NEventsListener: + description: "Number of event listeners subscribed." + type: "integer" + example: 30 + KernelVersion: + description: | + Kernel version of the host. + + On Linux, this information obtained from `uname`. On Windows this + information is queried from the HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\ + registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_. + type: "string" + example: "6.8.0-31-generic" + OperatingSystem: + description: | + Name of the host's operating system, for example: "Ubuntu 24.04 LTS" + or "Windows Server 2016 Datacenter" + type: "string" + example: "Ubuntu 24.04 LTS" + OSVersion: + description: | + Version of the host's operating system + +


+ + > **Note**: The information returned in this field, including its + > very existence, and the formatting of values, should not be considered + > stable, and may change without notice. + type: "string" + example: "24.04" + OSType: + description: | + Generic type of the operating system of the host, as returned by the + Go runtime (`GOOS`). + + Currently returned values are "linux" and "windows". A full list of + possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + type: "string" + example: "linux" + Architecture: + description: | + Hardware architecture of the host, as returned by the Go runtime + (`GOARCH`). + + A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + type: "string" + example: "x86_64" + NCPU: + description: | + The number of logical CPUs usable by the daemon. + + The number of available CPUs is checked by querying the operating + system when the daemon starts. Changes to operating system CPU + allocation after the daemon is started are not reflected. + type: "integer" + example: 4 + MemTotal: + description: | + Total amount of physical memory available on the host, in bytes. + type: "integer" + format: "int64" + example: 2095882240 + + IndexServerAddress: + description: | + Address / URL of the index server that is used for image search, + and as a default for user authentication for Docker Hub and Docker Cloud. + default: "https://index.docker.io/v1/" + type: "string" + example: "https://index.docker.io/v1/" + RegistryConfig: + $ref: "#/definitions/RegistryServiceConfig" + GenericResources: + $ref: "#/definitions/GenericResources" + HttpProxy: + description: | + HTTP-proxy configured for the daemon. This value is obtained from the + [`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + are masked in the API response. + + Containers do not automatically inherit this configuration. + type: "string" + example: "http://xxxxx:xxxxx@proxy.corp.example.com:8080" + HttpsProxy: + description: | + HTTPS-proxy configured for the daemon. This value is obtained from the + [`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + are masked in the API response. + + Containers do not automatically inherit this configuration. + type: "string" + example: "https://xxxxx:xxxxx@proxy.corp.example.com:4443" + NoProxy: + description: | + Comma-separated list of domain extensions for which no proxy should be + used. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) + environment variable. + + Containers do not automatically inherit this configuration. + type: "string" + example: "*.local, 169.254/16" + Name: + description: "Hostname of the host." + type: "string" + example: "node5.corp.example.com" + Labels: + description: | + User-defined labels (key/value metadata) as set on the daemon. + +


+ + > **Note**: When part of a Swarm, nodes can both have _daemon_ labels, + > set through the daemon configuration, and _node_ labels, set from a + > manager node in the Swarm. Node labels are not included in this + > field. Node labels can be retrieved using the `/nodes/(id)` endpoint + > on a manager node in the Swarm. + type: "array" + items: + type: "string" + example: ["storage=ssd", "production"] + ExperimentalBuild: + description: | + Indicates if experimental features are enabled on the daemon. + type: "boolean" + example: true + ServerVersion: + description: | + Version string of the daemon. + type: "string" + example: "27.0.1" + Runtimes: + description: | + List of [OCI compliant](https://github.com/opencontainers/runtime-spec) + runtimes configured on the daemon. Keys hold the "name" used to + reference the runtime. + + The Docker daemon relies on an OCI compliant runtime (invoked via the + `containerd` daemon) as its interface to the Linux kernel namespaces, + cgroups, and SELinux. + + The default runtime is `runc`, and automatically configured. Additional + runtimes can be configured by the user and will be listed here. + type: "object" + additionalProperties: + $ref: "#/definitions/Runtime" + default: + runc: + path: "runc" + example: + runc: + path: "runc" + runc-master: + path: "/go/bin/runc" + custom: + path: "/usr/local/bin/my-oci-runtime" + runtimeArgs: ["--debug", "--systemd-cgroup=false"] + DefaultRuntime: + description: | + Name of the default OCI runtime that is used when starting containers. + + The default can be overridden per-container at create time. + type: "string" + default: "runc" + example: "runc" + Swarm: + $ref: "#/definitions/SwarmInfo" + LiveRestoreEnabled: + description: | + Indicates if live restore is enabled. + + If enabled, containers are kept running when the daemon is shutdown + or upon daemon start if running containers are detected. + type: "boolean" + default: false + example: false + Isolation: + description: | + Represents the isolation technology to use as a default for containers. + The supported values are platform-specific. + + If no isolation value is specified on daemon start, on Windows client, + the default is `hyperv`, and on Windows server, the default is `process`. + + This option is currently not used on other platforms. + default: "default" + type: "string" + enum: + - "default" + - "hyperv" + - "process" + - "" + InitBinary: + description: | + Name and, optional, path of the `docker-init` binary. + + If the path is omitted, the daemon searches the host's `$PATH` for the + binary and uses the first result. + type: "string" + example: "docker-init" + ContainerdCommit: + $ref: "#/definitions/Commit" + RuncCommit: + $ref: "#/definitions/Commit" + InitCommit: + $ref: "#/definitions/Commit" + SecurityOptions: + description: | + List of security features that are enabled on the daemon, such as + apparmor, seccomp, SELinux, user-namespaces (userns), rootless and + no-new-privileges. + + Additional configuration options for each security feature may + be present, and are included as a comma-separated list of key/value + pairs. + type: "array" + items: + type: "string" + example: + - "name=apparmor" + - "name=seccomp,profile=default" + - "name=selinux" + - "name=userns" + - "name=rootless" + ProductLicense: + description: | + Reports a summary of the product license on the daemon. + + If a commercial license has been applied to the daemon, information + such as number of nodes, and expiration are included. + type: "string" + example: "Community Engine" + DefaultAddressPools: + description: | + List of custom default address pools for local networks, which can be + specified in the daemon.json file or dockerd option. + + Example: a Base "10.10.0.0/16" with Size 24 will define the set of 256 + 10.10.[0-255].0/24 address pools. + type: "array" + items: + type: "object" + properties: + Base: + description: "The network address in CIDR format" + type: "string" + example: "10.10.0.0/16" + Size: + description: "The network pool size" + type: "integer" + example: "24" + FirewallBackend: + $ref: "#/definitions/FirewallInfo" + Warnings: + description: | + List of warnings / informational messages about missing features, or + issues related to the daemon configuration. + + These messages can be printed by the client as information to the user. + type: "array" + items: + type: "string" + example: + - "WARNING: No memory limit support" + CDISpecDirs: + description: | + List of directories where (Container Device Interface) CDI + specifications are located. + + These specifications define vendor-specific modifications to an OCI + runtime specification for a container being created. + + An empty list indicates that CDI device injection is disabled. + + Note that since using CDI device injection requires the daemon to have + experimental enabled. For non-experimental daemons an empty list will + always be returned. + type: "array" + items: + type: "string" + example: + - "/etc/cdi" + - "/var/run/cdi" + Containerd: + $ref: "#/definitions/ContainerdInfo" + + ContainerdInfo: + description: | + Information for connecting to the containerd instance that is used by the daemon. + This is included for debugging purposes only. + type: "object" + x-nullable: true + properties: + Address: + description: "The address of the containerd socket." + type: "string" + example: "/run/containerd/containerd.sock" + Namespaces: + description: | + The namespaces that the daemon uses for running containers and + plugins in containerd. These namespaces can be configured in the + daemon configuration, and are considered to be used exclusively + by the daemon, Tampering with the containerd instance may cause + unexpected behavior. + + As these namespaces are considered to be exclusively accessed + by the daemon, it is not recommended to change these values, + or to change them to a value that is used by other systems, + such as cri-containerd. + type: "object" + properties: + Containers: + description: | + The default containerd namespace used for containers managed + by the daemon. + + The default namespace for containers is "moby", but will be + suffixed with the `.` of the remapped `root` if + user-namespaces are enabled and the containerd image-store + is used. + type: "string" + default: "moby" + example: "moby" + Plugins: + description: | + The default containerd namespace used for plugins managed by + the daemon. + + The default namespace for plugins is "plugins.moby", but will be + suffixed with the `.` of the remapped `root` if + user-namespaces are enabled and the containerd image-store + is used. + type: "string" + default: "plugins.moby" + example: "plugins.moby" + + FirewallInfo: + description: | + Information about the daemon's firewalling configuration. + + This field is currently only used on Linux, and omitted on other platforms. + type: "object" + x-nullable: true + properties: + Driver: + description: | + The name of the firewall backend driver. + type: "string" + example: "nftables" + Info: + description: | + Information about the firewall backend, provided as + "label" / "value" pairs. + +


+ + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "array" + items: + type: "array" + items: + type: "string" + example: + - ["ReloadedAt", "2025-01-01T00:00:00Z"] + + # PluginsInfo is a temp struct holding Plugins name + # registered with docker daemon. It is used by Info struct + PluginsInfo: + description: | + Available plugins per type. + +


+ + > **Note**: Only unmanaged (V1) plugins are included in this list. + > V1 plugins are "lazily" loaded, and are not returned in this list + > if there is no resource using the plugin. + type: "object" + properties: + Volume: + description: "Names of available volume-drivers, and network-driver plugins." + type: "array" + items: + type: "string" + example: ["local"] + Network: + description: "Names of available network-drivers, and network-driver plugins." + type: "array" + items: + type: "string" + example: ["bridge", "host", "ipvlan", "macvlan", "null", "overlay"] + Authorization: + description: "Names of available authorization plugins." + type: "array" + items: + type: "string" + example: ["img-authz-plugin", "hbm"] + Log: + description: "Names of available logging-drivers, and logging-driver plugins." + type: "array" + items: + type: "string" + example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "splunk", "syslog"] + + + RegistryServiceConfig: + description: | + RegistryServiceConfig stores daemon registry services configuration. + type: "object" + x-nullable: true + properties: + InsecureRegistryCIDRs: + description: | + List of IP ranges of insecure registries, using the CIDR syntax + ([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries + accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates + from unknown CAs) communication. + + By default, local registries (`::1/128` and `127.0.0.0/8`) are configured as + insecure. All other registries are secure. Communicating with an + insecure registry is not possible if the daemon assumes that registry + is secure. + + This configuration override this behavior, insecure communication with + registries whose resolved IP address is within the subnet described by + the CIDR syntax. + + Registries can also be marked insecure by hostname. Those registries + are listed under `IndexConfigs` and have their `Secure` field set to + `false`. + + > **Warning**: Using this option can be useful when running a local + > registry, but introduces security vulnerabilities. This option + > should therefore ONLY be used for testing purposes. For increased + > security, users should add their CA to their system's list of trusted + > CAs instead of enabling this option. + type: "array" + items: + type: "string" + example: ["::1/128", "127.0.0.0/8"] + IndexConfigs: + type: "object" + additionalProperties: + $ref: "#/definitions/IndexInfo" + example: + "127.0.0.1:5000": + "Name": "127.0.0.1:5000" + "Mirrors": [] + "Secure": false + "Official": false + "[2001:db8:a0b:12f0::1]:80": + "Name": "[2001:db8:a0b:12f0::1]:80" + "Mirrors": [] + "Secure": false + "Official": false + "docker.io": + Name: "docker.io" + Mirrors: ["https://hub-mirror.corp.example.com:5000/"] + Secure: true + Official: true + "registry.internal.corp.example.com:3000": + Name: "registry.internal.corp.example.com:3000" + Mirrors: [] + Secure: false + Official: false + Mirrors: + description: | + List of registry URLs that act as a mirror for the official + (`docker.io`) registry. + + type: "array" + items: + type: "string" + example: + - "https://hub-mirror.corp.example.com:5000/" + - "https://[2001:db8:a0b:12f0::1]/" + + IndexInfo: + description: + IndexInfo contains information about a registry. + type: "object" + x-nullable: true + properties: + Name: + description: | + Name of the registry, such as "docker.io". + type: "string" + example: "docker.io" + Mirrors: + description: | + List of mirrors, expressed as URIs. + type: "array" + items: + type: "string" + example: + - "https://hub-mirror.corp.example.com:5000/" + - "https://registry-2.docker.io/" + - "https://registry-3.docker.io/" + Secure: + description: | + Indicates if the registry is part of the list of insecure + registries. + + If `false`, the registry is insecure. Insecure registries accept + un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from + unknown CAs) communication. + + > **Warning**: Insecure registries can be useful when running a local + > registry. However, because its use creates security vulnerabilities + > it should ONLY be enabled for testing purposes. For increased + > security, users should add their CA to their system's list of + > trusted CAs instead of enabling this option. + type: "boolean" + example: true + Official: + description: | + Indicates whether this is an official registry (i.e., Docker Hub / docker.io) + type: "boolean" + example: true + + Runtime: + description: | + Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec) + runtime. + + The runtime is invoked by the daemon via the `containerd` daemon. OCI + runtimes act as an interface to the Linux kernel namespaces, cgroups, + and SELinux. + type: "object" + properties: + path: + description: | + Name and, optional, path, of the OCI executable binary. + + If the path is omitted, the daemon searches the host's `$PATH` for the + binary and uses the first result. + type: "string" + example: "/usr/local/bin/my-oci-runtime" + runtimeArgs: + description: | + List of command-line arguments to pass to the runtime when invoked. + type: "array" + x-nullable: true + items: + type: "string" + example: ["--debug", "--systemd-cgroup=false"] + status: + description: | + Information specific to the runtime. + + While this API specification does not define data provided by runtimes, + the following well-known properties may be provided by runtimes: + + `org.opencontainers.runtime-spec.features`: features structure as defined + in the [OCI Runtime Specification](https://github.com/opencontainers/runtime-spec/blob/main/features.md), + in a JSON string representation. + +


+ + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + "org.opencontainers.runtime-spec.features": "{\"ociVersionMin\":\"1.0.0\",\"ociVersionMax\":\"1.1.0\",\"...\":\"...\"}" + + Commit: + description: | + Commit holds the Git-commit (SHA1) that a binary was built from, as + reported in the version-string of external tools, such as `containerd`, + or `runC`. + type: "object" + properties: + ID: + description: "Actual commit ID of external tool." + type: "string" + example: "cfb82a876ecc11b5ca0977d1733adbe58599088a" + + SwarmInfo: + description: | + Represents generic information about swarm. + type: "object" + properties: + NodeID: + description: "Unique identifier of for this node in the swarm." + type: "string" + default: "" + example: "k67qz4598weg5unwwffg6z1m1" + NodeAddr: + description: | + IP address at which this node can be reached by other nodes in the + swarm. + type: "string" + default: "" + example: "10.0.0.46" + LocalNodeState: + $ref: "#/definitions/LocalNodeState" + ControlAvailable: + type: "boolean" + default: false + example: true + Error: + type: "string" + default: "" + RemoteManagers: + description: | + List of ID's and addresses of other managers in the swarm. + type: "array" + default: null + x-nullable: true + items: + $ref: "#/definitions/PeerNode" + example: + - NodeID: "71izy0goik036k48jg985xnds" + Addr: "10.0.0.158:2377" + - NodeID: "79y6h1o4gv8n120drcprv5nmc" + Addr: "10.0.0.159:2377" + - NodeID: "k67qz4598weg5unwwffg6z1m1" + Addr: "10.0.0.46:2377" + Nodes: + description: "Total number of nodes in the swarm." + type: "integer" + x-nullable: true + example: 4 + Managers: + description: "Total number of managers in the swarm." + type: "integer" + x-nullable: true + example: 3 + Cluster: + $ref: "#/definitions/ClusterInfo" + + LocalNodeState: + description: "Current local status of this node." + type: "string" + default: "" + enum: + - "" + - "inactive" + - "pending" + - "active" + - "error" + - "locked" + example: "active" + + PeerNode: + description: "Represents a peer-node in the swarm" + type: "object" + properties: + NodeID: + description: "Unique identifier of for this node in the swarm." + type: "string" + Addr: + description: | + IP address and ports at which this node can be reached. + type: "string" + + NetworkAttachmentConfig: + description: | + Specifies how a service should be attached to a particular network. + type: "object" + properties: + Target: + description: | + The target network for attachment. Must be a network name or ID. + type: "string" + Aliases: + description: | + Discoverable alternate names for the service on this network. + type: "array" + items: + type: "string" + DriverOpts: + description: | + Driver attachment options for the network target. + type: "object" + additionalProperties: + type: "string" + + EventActor: + description: | + Actor describes something that generates events, like a container, network, + or a volume. + type: "object" + properties: + ID: + description: "The ID of the object emitting the event" + type: "string" + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Attributes: + description: | + Various key/value attributes of the object, depending on its type. + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-label-value" + image: "alpine:latest" + name: "my-container" + + EventMessage: + description: | + EventMessage represents the information an event contains. + type: "object" + title: "SystemEventsResponse" + properties: + Type: + description: "The type of object emitting the event" + type: "string" + enum: ["builder", "config", "container", "daemon", "image", "network", "node", "plugin", "secret", "service", "volume"] + example: "container" + Action: + description: "The type of event" + type: "string" + example: "create" + Actor: + $ref: "#/definitions/EventActor" + scope: + description: | + Scope of the event. Engine events are `local` scope. Cluster (Swarm) + events are `swarm` scope. + type: "string" + enum: ["local", "swarm"] + time: + description: "Timestamp of event" + type: "integer" + format: "int64" + example: 1629574695 + timeNano: + description: "Timestamp of event, with nanosecond accuracy" + type: "integer" + format: "int64" + example: 1629574695515050031 + + OCIDescriptor: + type: "object" + x-go-name: Descriptor + description: | + A descriptor struct containing digest, media type, and size, as defined in + the [OCI Content Descriptors Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/descriptor.md). + properties: + mediaType: + description: | + The media type of the object this schema refers to. + type: "string" + example: "application/vnd.oci.image.manifest.v1+json" + digest: + description: | + The digest of the targeted content. + type: "string" + example: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" + size: + description: | + The size in bytes of the blob. + type: "integer" + format: "int64" + example: 424 + urls: + description: |- + List of URLs from which this object MAY be downloaded. + type: "array" + items: + type: "string" + format: "uri" + x-nullable: true + annotations: + description: |- + Arbitrary metadata relating to the targeted content. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + "com.docker.official-images.bashbrew.arch": "amd64" + "org.opencontainers.image.base.digest": "sha256:0d0ef5c914d3ea700147da1bd050c59edb8bb12ca312f3800b29d7c8087eabd8" + "org.opencontainers.image.base.name": "scratch" + "org.opencontainers.image.created": "2025-01-27T00:00:00Z" + "org.opencontainers.image.revision": "9fabb4bad5138435b01857e2fe9363e2dc5f6a79" + "org.opencontainers.image.source": "https://git.launchpad.net/cloud-images/+oci/ubuntu-base" + "org.opencontainers.image.url": "https://hub.docker.com/_/ubuntu" + "org.opencontainers.image.version": "24.04" + data: + type: string + x-nullable: true + description: |- + Data is an embedding of the targeted content. This is encoded as a base64 + string when marshalled to JSON (automatically, by encoding/json). If + present, Data can be used directly to avoid fetching the targeted content. + example: null + platform: + $ref: "#/definitions/OCIPlatform" + artifactType: + description: |- + ArtifactType is the IANA media type of this artifact. + type: "string" + x-nullable: true + example: null + + OCIPlatform: + type: "object" + x-go-name: Platform + x-nullable: true + description: | + Describes the platform which the image in the manifest runs on, as defined + in the [OCI Image Index Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/image-index.md). + properties: + architecture: + description: | + The CPU architecture, for example `amd64` or `ppc64`. + type: "string" + example: "arm" + os: + description: | + The operating system, for example `linux` or `windows`. + type: "string" + example: "windows" + os.version: + description: | + Optional field specifying the operating system version, for example on + Windows `10.0.19041.1165`. + type: "string" + example: "10.0.19041.1165" + os.features: + description: | + Optional field specifying an array of strings, each listing a required + OS feature (for example on Windows `win32k`). + type: "array" + items: + type: "string" + example: + - "win32k" + variant: + description: | + Optional field specifying a variant of the CPU, for example `v7` to + specify ARMv7 when architecture is `arm`. + type: "string" + example: "v7" + + DistributionInspect: + type: "object" + x-go-name: DistributionInspect + title: "DistributionInspectResponse" + required: [Descriptor, Platforms] + description: | + Describes the result obtained from contacting the registry to retrieve + image metadata. + properties: + Descriptor: + $ref: "#/definitions/OCIDescriptor" + Platforms: + type: "array" + description: | + An array containing all platforms supported by the image. + items: + $ref: "#/definitions/OCIPlatform" + + ClusterVolume: + type: "object" + description: | + Options and information specific to, and only present on, Swarm CSI + cluster volumes. + properties: + ID: + type: "string" + description: | + The Swarm ID of this volume. Because cluster volumes are Swarm + objects, they have an ID, unlike non-cluster volumes. This ID can + be used to refer to the Volume instead of the name. + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ClusterVolumeSpec" + Info: + type: "object" + description: | + Information about the global status of the volume. + properties: + CapacityBytes: + type: "integer" + format: "int64" + description: | + The capacity of the volume in bytes. A value of 0 indicates that + the capacity is unknown. + VolumeContext: + type: "object" + description: | + A map of strings to strings returned from the storage plugin when + the volume is created. + additionalProperties: + type: "string" + VolumeID: + type: "string" + description: | + The ID of the volume as returned by the CSI storage plugin. This + is distinct from the volume's ID as provided by Docker. This ID + is never used by the user when communicating with Docker to refer + to this volume. If the ID is blank, then the Volume has not been + successfully created in the plugin yet. + AccessibleTopology: + type: "array" + description: | + The topology this volume is actually accessible from. + items: + $ref: "#/definitions/Topology" + PublishStatus: + type: "array" + description: | + The status of the volume as it pertains to its publishing and use on + specific nodes + items: + type: "object" + properties: + NodeID: + type: "string" + description: | + The ID of the Swarm node the volume is published on. + State: + type: "string" + description: | + The published state of the volume. + * `pending-publish` The volume should be published to this node, but the call to the controller plugin to do so has not yet been successfully completed. + * `published` The volume is published successfully to the node. + * `pending-node-unpublish` The volume should be unpublished from the node, and the manager is awaiting confirmation from the worker that it has done so. + * `pending-controller-unpublish` The volume is successfully unpublished from the node, but has not yet been successfully unpublished on the controller. + enum: + - "pending-publish" + - "published" + - "pending-node-unpublish" + - "pending-controller-unpublish" + PublishContext: + type: "object" + description: | + A map of strings to strings returned by the CSI controller + plugin when a volume is published. + additionalProperties: + type: "string" + + ClusterVolumeSpec: + type: "object" + description: | + Cluster-specific options used to create the volume. + properties: + Group: + type: "string" + description: | + Group defines the volume group of this volume. Volumes belonging to + the same group can be referred to by group name when creating + Services. Referring to a volume by group instructs Swarm to treat + volumes in that group interchangeably for the purpose of scheduling. + Volumes with an empty string for a group technically all belong to + the same, emptystring group. + AccessMode: + type: "object" + description: | + Defines how the volume is used by tasks. + properties: + Scope: + type: "string" + description: | + The set of nodes this volume can be used on at one time. + - `single` The volume may only be scheduled to one node at a time. + - `multi` the volume may be scheduled to any supported number of nodes at a time. + default: "single" + enum: ["single", "multi"] + x-nullable: false + Sharing: + type: "string" + description: | + The number and way that different tasks can use this volume + at one time. + - `none` The volume may only be used by one task at a time. + - `readonly` The volume may be used by any number of tasks, but they all must mount the volume as readonly + - `onewriter` The volume may be used by any number of tasks, but only one may mount it as read/write. + - `all` The volume may have any number of readers and writers. + default: "none" + enum: ["none", "readonly", "onewriter", "all"] + x-nullable: false + MountVolume: + type: "object" + description: | + Options for using this volume as a Mount-type volume. + + Either MountVolume or BlockVolume, but not both, must be + present. + properties: + FsType: + type: "string" + description: | + Specifies the filesystem type for the mount volume. + Optional. + MountFlags: + type: "array" + description: | + Flags to pass when mounting the volume. Optional. + items: + type: "string" + BlockVolume: + type: "object" + description: | + Options for using this volume as a Block-type volume. + Intentionally empty. + Secrets: + type: "array" + description: | + Swarm Secrets that are passed to the CSI storage plugin when + operating on this volume. + items: + type: "object" + description: | + One cluster volume secret entry. Defines a key-value pair that + is passed to the plugin. + properties: + Key: + type: "string" + description: | + Key is the name of the key of the key-value pair passed to + the plugin. + Secret: + type: "string" + description: | + Secret is the swarm Secret object from which to read data. + This can be a Secret name or ID. The Secret data is + retrieved by swarm and used as the value of the key-value + pair passed to the plugin. + AccessibilityRequirements: + type: "object" + description: | + Requirements for the accessible topology of the volume. These + fields are optional. For an in-depth description of what these + fields mean, see the CSI specification. + properties: + Requisite: + type: "array" + description: | + A list of required topologies, at least one of which the + volume must be accessible from. + items: + $ref: "#/definitions/Topology" + Preferred: + type: "array" + description: | + A list of topologies that the volume should attempt to be + provisioned in. + items: + $ref: "#/definitions/Topology" + CapacityRange: + type: "object" + description: | + The desired capacity that the volume should be created with. If + empty, the plugin will decide the capacity. + properties: + RequiredBytes: + type: "integer" + format: "int64" + description: | + The volume must be at least this big. The value of 0 + indicates an unspecified minimum + LimitBytes: + type: "integer" + format: "int64" + description: | + The volume must not be bigger than this. The value of 0 + indicates an unspecified maximum. + Availability: + type: "string" + description: | + The availability of the volume for use in tasks. + - `active` The volume is fully available for scheduling on the cluster + - `pause` No new workloads should use the volume, but existing workloads are not stopped. + - `drain` All workloads using this volume should be stopped and rescheduled, and no new ones should be started. + default: "active" + x-nullable: false + enum: + - "active" + - "pause" + - "drain" + + Topology: + description: | + A map of topological domains to topological segments. For in depth + details, see documentation for the Topology object in the CSI + specification. + type: "object" + additionalProperties: + type: "string" + + ImageManifestSummary: + x-go-name: "ManifestSummary" + description: | + ImageManifestSummary represents a summary of an image manifest. + type: "object" + required: ["ID", "Descriptor", "Available", "Size", "Kind"] + properties: + ID: + description: | + ID is the content-addressable ID of an image and is the same as the + digest of the image manifest. + type: "string" + example: "sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f" + Descriptor: + $ref: "#/definitions/OCIDescriptor" + Available: + description: Indicates whether all the child content (image config, layers) is fully available locally. + type: "boolean" + example: true + Size: + type: "object" + x-nullable: false + required: ["Content", "Total"] + properties: + Total: + type: "integer" + format: "int64" + example: 8213251 + description: | + Total is the total size (in bytes) of all the locally present + data (both distributable and non-distributable) that's related to + this manifest and its children. + This equal to the sum of [Content] size AND all the sizes in the + [Size] struct present in the Kind-specific data struct. + For example, for an image kind (Kind == "image") + this would include the size of the image content and unpacked + image snapshots ([Size.Content] + [ImageData.Size.Unpacked]). + Content: + description: | + Content is the size (in bytes) of all the locally present + content in the content store (e.g. image config, layers) + referenced by this manifest and its children. + This only includes blobs in the content store. + type: "integer" + format: "int64" + example: 3987495 + Kind: + type: "string" + example: "image" + enum: + - "image" + - "attestation" + - "unknown" + description: | + The kind of the manifest. + + kind | description + -------------|----------------------------------------------------------- + image | Image manifest that can be used to start a container. + attestation | Attestation manifest produced by the Buildkit builder for a specific image manifest. + ImageData: + description: | + The image data for the image manifest. + This field is only populated when Kind is "image". + type: "object" + x-nullable: true + x-omitempty: true + required: ["Platform", "Containers", "Size", "UnpackedSize"] + properties: + Platform: + $ref: "#/definitions/OCIPlatform" + description: | + OCI platform of the image. This will be the platform specified in the + manifest descriptor from the index/manifest list. + If it's not available, it will be obtained from the image config. + Containers: + description: | + The IDs of the containers that are using this image. + type: "array" + items: + type: "string" + example: ["ede54ee1fda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c7430", "abadbce344c096744d8d6071a90d474d28af8f1034b5ea9fb03c3f4bfc6d005e"] + Size: + type: "object" + x-nullable: false + required: ["Unpacked"] + properties: + Unpacked: + type: "integer" + format: "int64" + example: 3987495 + description: | + Unpacked is the size (in bytes) of the locally unpacked + (uncompressed) image content that's directly usable by the containers + running this image. + It's independent of the distributable content - e.g. + the image might still have an unpacked data that's still used by + some container even when the distributable/compressed content is + already gone. + AttestationData: + description: | + The image data for the attestation manifest. + This field is only populated when Kind is "attestation". + type: "object" + x-nullable: true + x-omitempty: true + required: ["For"] + properties: + For: + description: | + The digest of the image manifest that this attestation is for. + type: "string" + example: "sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f" + +paths: + /containers/json: + get: + summary: "List containers" + description: | + Returns a list of containers. For details on the format, see the + [inspect endpoint](#operation/ContainerInspect). + + Note that it uses a different, smaller representation of a container + than inspecting a single container. For example, the list of linked + containers is not propagated . + operationId: "ContainerList" + produces: + - "application/json" + parameters: + - name: "all" + in: "query" + description: | + Return all containers. By default, only running containers are shown. + type: "boolean" + default: false + - name: "limit" + in: "query" + description: | + Return this number of most recently created containers, including + non-running ones. + type: "integer" + - name: "size" + in: "query" + description: | + Return the size of container as fields `SizeRw` and `SizeRootFs`. + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + Filters to process on the container list, encoded as JSON (a + `map[string][]string`). For example, `{"status": ["paused"]}` will + only return paused containers. + + Available filters: + + - `ancestor`=(`[:]`, ``, or ``) + - `before`=(`` or ``) + - `expose`=(`[/]`|`/[]`) + - `exited=` containers with exit code of `` + - `health`=(`starting`|`healthy`|`unhealthy`|`none`) + - `id=` a container's ID + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + - `is-task=`(`true`|`false`) + - `label=key` or `label="key=value"` of a container label + - `name=` a container's name + - `network`=(`` or ``) + - `publish`=(`[/]`|`/[]`) + - `since`=(`` or ``) + - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) + - `volume`=(`` or ``) + type: "string" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/ContainerSummary" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/create: + post: + summary: "Create a container" + operationId: "ContainerCreate" + consumes: + - "application/json" + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "name" + in: "query" + description: | + Assign the specified name to the container. Must match + `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`. + type: "string" + pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$" + - name: "platform" + in: "query" + description: | + Platform in the format `os[/arch[/variant]]` used for image lookup. + + When specified, the daemon checks if the requested image is present + in the local image cache with the given OS and Architecture, and + otherwise returns a `404` status. + + If the option is not set, the host's native OS and Architecture are + used to look up the image in the image cache. However, if no platform + is passed and the given image does exist in the local image cache, + but its OS or architecture does not match, the container is created + with the available image, and a warning is added to the `Warnings` + field in the response, for example; + + WARNING: The requested image's platform (linux/arm64/v8) does not + match the detected host platform (linux/amd64) and no + specific platform was requested + + type: "string" + default: "" + - name: "body" + in: "body" + description: "Container to create" + schema: + allOf: + - $ref: "#/definitions/ContainerConfig" + - type: "object" + properties: + HostConfig: + $ref: "#/definitions/HostConfig" + NetworkingConfig: + $ref: "#/definitions/NetworkingConfig" + example: + Hostname: "" + Domainname: "" + User: "" + AttachStdin: false + AttachStdout: true + AttachStderr: true + Tty: false + OpenStdin: false + StdinOnce: false + Env: + - "FOO=bar" + - "BAZ=quux" + Cmd: + - "date" + Entrypoint: "" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + Volumes: + /volumes/data: {} + WorkingDir: "" + NetworkDisabled: false + MacAddress: "12:34:56:78:9a:bc" + ExposedPorts: + 22/tcp: {} + StopSignal: "SIGTERM" + StopTimeout: 10 + HostConfig: + Binds: + - "/tmp:/tmp" + Links: + - "redis3:redis" + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + NanoCpus: 500000 + CpuPercent: 80 + CpuShares: 512 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpuQuota: 50000 + CpusetCpus: "0,1" + CpusetMems: "0,1" + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 300 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceWriteIOps: + - {} + DeviceRequests: + - Driver: "nvidia" + Count: -1 + DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] + Capabilities: [["gpu", "nvidia", "compute"]] + Options: + property1: "string" + property2: "string" + MemorySwappiness: 60 + OomKillDisable: false + OomScoreAdj: 500 + PidMode: "" + PidsLimit: 0 + PortBindings: + 22/tcp: + - HostPort: "11022" + PublishAllPorts: false + Privileged: false + ReadonlyRootfs: false + Dns: + - "8.8.8.8" + DnsOptions: + - "" + DnsSearch: + - "" + VolumesFrom: + - "parent" + - "other:ro" + CapAdd: + - "NET_ADMIN" + CapDrop: + - "MKNOD" + GroupAdd: + - "newgroup" + RestartPolicy: + Name: "" + MaximumRetryCount: 0 + AutoRemove: true + NetworkMode: "bridge" + Devices: [] + Ulimits: + - {} + LogConfig: + Type: "json-file" + Config: {} + SecurityOpt: [] + StorageOpt: {} + CgroupParent: "" + VolumeDriver: "" + ShmSize: 67108864 + NetworkingConfig: + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + database_nw: {} + + required: true + responses: + 201: + description: "Container created successfully" + schema: + $ref: "#/definitions/ContainerCreateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/{id}/json: + get: + summary: "Inspect a container" + description: "Return low-level information about a container." + operationId: "ContainerInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerInspectResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "size" + in: "query" + type: "boolean" + default: false + description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" + tags: ["Container"] + /containers/{id}/top: + get: + summary: "List processes running inside a container" + description: | + On Unix systems, this is done by running the `ps` command. This endpoint + is not supported on Windows. + operationId: "ContainerTop" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerTopResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "ps_args" + in: "query" + description: "The arguments to pass to `ps`. For example, `aux`" + type: "string" + default: "-ef" + tags: ["Container"] + /containers/{id}/logs: + get: + summary: "Get container logs" + description: | + Get `stdout` and `stderr` logs from a container. + + Note: This endpoint works only for containers with the `json-file` or + `journald` logging driver. + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + operationId: "ContainerLogs" + responses: + 200: + description: | + logs returned as a stream in response body. + For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + Note that unlike the attach endpoint, the logs endpoint does not + upgrade the connection and does not set Content-Type. + schema: + type: "string" + format: "binary" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "until" + in: "query" + description: "Only return logs before this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Container"] + /containers/{id}/changes: + get: + summary: "Get changes on a container’s filesystem" + description: | + Returns which files in a container's filesystem have been added, deleted, + or modified. The `Kind` of modification can be one of: + + - `0`: Modified ("C") + - `1`: Added ("A") + - `2`: Deleted ("D") + operationId: "ContainerChanges" + produces: ["application/json"] + responses: + 200: + description: "The list of changes" + schema: + type: "array" + items: + $ref: "#/definitions/FilesystemChange" + examples: + application/json: + - Path: "/dev" + Kind: 0 + - Path: "/dev/kmsg" + Kind: 1 + - Path: "/test" + Kind: 1 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/export: + get: + summary: "Export a container" + description: "Export the contents of a container as a tarball." + operationId: "ContainerExport" + produces: + - "application/octet-stream" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/stats: + get: + summary: "Get container stats based on resource usage" + description: | + This endpoint returns a live stream of a container’s resource usage + statistics. + + The `precpu_stats` is the CPU statistic of the *previous* read, and is + used to calculate the CPU usage percentage. It is not an exact copy + of the `cpu_stats` field. + + If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is + nil then for compatibility with older daemons the length of the + corresponding `cpu_usage.percpu_usage` array should be used. + + On a cgroup v2 host, the following fields are not set + * `blkio_stats`: all fields other than `io_service_bytes_recursive` + * `cpu_stats`: `cpu_usage.percpu_usage` + * `memory_stats`: `max_usage` and `failcnt` + Also, `memory_stats.stats` fields are incompatible with cgroup v1. + + To calculate the values shown by the `stats` command of the docker cli tool + the following formulas can be used: + * used_memory = `memory_stats.usage - memory_stats.stats.cache` + * available_memory = `memory_stats.limit` + * Memory usage % = `(used_memory / available_memory) * 100.0` + * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` + * system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage` + * number_cpus = `length(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus` + * CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0` + operationId: "ContainerStats" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerStatsResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "stream" + in: "query" + description: | + Stream the output. If false, the stats will be output once and then + it will disconnect. + type: "boolean" + default: true + - name: "one-shot" + in: "query" + description: | + Only get a single stat instead of waiting for 2 cycles. Must be used + with `stream=false`. + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/resize: + post: + summary: "Resize a container TTY" + description: "Resize the TTY for a container." + operationId: "ContainerResize" + consumes: + - "application/octet-stream" + produces: + - "text/plain" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "cannot resize container" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "h" + in: "query" + required: true + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + required: true + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Container"] + /containers/{id}/start: + post: + summary: "Start a container" + operationId: "ContainerStart" + responses: + 204: + description: "no error" + 304: + description: "container already started" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container. Format is a + single character `[a-Z]` or `ctrl-` where `` is one + of: `a-z`, `@`, `^`, `[`, `,` or `_`. + type: "string" + tags: ["Container"] + /containers/{id}/stop: + post: + summary: "Stop a container" + operationId: "ContainerStop" + responses: + 204: + description: "no error" + 304: + description: "container already stopped" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/restart: + post: + summary: "Restart a container" + operationId: "ContainerRestart" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/kill: + post: + summary: "Kill a container" + description: | + Send a POSIX signal to a container, defaulting to killing to the + container. + operationId: "ContainerKill" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is not running" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "Container d37cde0fe4ad63c3a7252023b2f9800282894247d145cb5933ddf6e52cc03a28 is not running" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + default: "SIGKILL" + tags: ["Container"] + /containers/{id}/update: + post: + summary: "Update a container" + description: | + Change various configuration options of a container without having to + recreate it. + operationId: "ContainerUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "The container has been updated." + schema: + $ref: "#/definitions/ContainerUpdateResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "update" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + example: + BlkioWeight: 300 + CpuShares: 512 + CpuPeriod: 100000 + CpuQuota: 50000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpusetCpus: "0,1" + CpusetMems: "0" + Memory: 314572800 + MemorySwap: 514288000 + MemoryReservation: 209715200 + RestartPolicy: + MaximumRetryCount: 4 + Name: "on-failure" + tags: ["Container"] + /containers/{id}/rename: + post: + summary: "Rename a container" + operationId: "ContainerRename" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "name already in use" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "name" + in: "query" + required: true + description: "New name for the container" + type: "string" + tags: ["Container"] + /containers/{id}/pause: + post: + summary: "Pause a container" + description: | + Use the freezer cgroup to suspend all processes in a container. + + Traditionally, when suspending a process the `SIGSTOP` signal is used, + which is observable by the process being suspended. With the freezer + cgroup the process is unaware, and unable to capture, that it is being + suspended, and subsequently resumed. + operationId: "ContainerPause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/unpause: + post: + summary: "Unpause a container" + description: "Resume a container which has been paused." + operationId: "ContainerUnpause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/attach: + post: + summary: "Attach to a container" + description: | + Attach to a container to read its output or send it input. You can attach + to the same container multiple times and you can reattach to containers + that have been detached. + + Either the `stream` or `logs` parameter must be `true` for this endpoint + to do anything. + + See the [documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) + for more details. + + ### Hijacking + + This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, + and `stderr` on the same socket. + + This is the response from the daemon for an attach request: + + ``` + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + [STREAM] + ``` + + After the headers and two new lines, the TCP connection can now be used + for raw, bidirectional communication between the client and server. + + To hint potential proxies about connection hijacking, the Docker client + can also optionally send connection upgrade headers. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 + Upgrade: tcp + Connection: Upgrade + ``` + + The Docker daemon will respond with a `101 UPGRADED` response, and will + similarly follow with the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + [STREAM] + ``` + + ### Stream format + + When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), + the HTTP Content-Type header is set to application/vnd.docker.multiplexed-stream + and the stream over the hijacked connected is multiplexed to separate out + `stdout` and `stderr`. The stream consists of a series of frames, each + containing a header and a payload. + + The header contains the information which the stream writes (`stdout` or + `stderr`). It also contains the size of the associated frame encoded in + the last four bytes (`uint32`). + + It is encoded on the first eight bytes like this: + + ```go + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + ``` + + `STREAM_TYPE` can be: + + - 0: `stdin` (is written on `stdout`) + - 1: `stdout` + - 2: `stderr` + + `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size + encoded as big endian. + + Following the header is the payload, which is the specified number of + bytes of `STREAM_TYPE`. + + The simplest way to implement this protocol is the following: + + 1. Read 8 bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + + ### Stream format when using a TTY + + When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), + the stream is not multiplexed. The data exchanged over the hijacked + connection is simply the raw data from the process PTY and client's + `stdin`. + + operationId: "ContainerAttach" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container.Format is a single + character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, + `@`, `^`, `[`, `,` or `_`. + type: "string" + - name: "logs" + in: "query" + description: | + Replay previous logs from the container. + + This is useful for attaching to a container that has started and you + want to output everything since the container started. + + If `stream` is also enabled, once all the previous output has been + returned, it will seamlessly transition into streaming current + output. + type: "boolean" + default: false + - name: "stream" + in: "query" + description: | + Stream attached streams from the time the request was made onwards. + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/attach/ws: + get: + summary: "Attach to a container via a websocket" + operationId: "ContainerAttachWebsocket" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container.Format is a single + character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, + `@`, `^`, `[`, `,`, or `_`. + type: "string" + - name: "logs" + in: "query" + description: "Return logs" + type: "boolean" + default: false + - name: "stream" + in: "query" + description: "Return stream" + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/wait: + post: + summary: "Wait for a container" + description: "Block until a container stops, then returns the exit code." + operationId: "ContainerWait" + produces: ["application/json"] + responses: + 200: + description: "The container has exit." + schema: + $ref: "#/definitions/ContainerWaitResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "condition" + in: "query" + description: | + Wait until a container state reaches the given condition. + + Defaults to `not-running` if omitted or empty. + type: "string" + enum: + - "not-running" + - "next-exit" + - "removed" + default: "not-running" + tags: ["Container"] + /containers/{id}: + delete: + summary: "Remove a container" + operationId: "ContainerDelete" + responses: + 204: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: | + You cannot remove a running container: c2ada9df5af8. Stop the + container before attempting removal or force remove + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "v" + in: "query" + description: "Remove anonymous volumes associated with the container." + type: "boolean" + default: false + - name: "force" + in: "query" + description: "If the container is running, kill it before removing it." + type: "boolean" + default: false + - name: "link" + in: "query" + description: "Remove the specified link associated with the container." + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/archive: + head: + summary: "Get information about files in a container" + description: | + A response header `X-Docker-Container-Path-Stat` is returned, containing + a base64 - encoded JSON object with some filesystem header information + about the path. + operationId: "ContainerArchiveInfo" + responses: + 200: + description: "no error" + headers: + X-Docker-Container-Path-Stat: + type: "string" + description: | + A base64 - encoded JSON object with some filesystem header + information about the path + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + get: + summary: "Get an archive of a filesystem resource in a container" + description: "Get a tar archive of a resource in the filesystem of container id." + operationId: "ContainerArchive" + produces: ["application/x-tar"] + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + put: + summary: "Extract an archive of files or folders to a directory in a container" + description: | + Upload a tar archive to be extracted to a path in the filesystem of container id. + `path` parameter is asserted to be a directory. If it exists as a file, 400 error + will be returned with message "not a directory". + operationId: "PutContainerArchive" + consumes: ["application/x-tar", "application/octet-stream"] + responses: + 200: + description: "The content was extracted successfully" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "not a directory" + 403: + description: "Permission denied, the volume or container rootfs is marked as read-only." + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such container or path does not exist inside the container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Path to a directory in the container to extract the archive’s contents into. " + type: "string" + - name: "noOverwriteDirNonDir" + in: "query" + description: | + If `1`, `true`, or `True` then it will be an error if unpacking the + given content would cause an existing directory to be replaced with + a non-directory and vice versa. + type: "string" + - name: "copyUIDGID" + in: "query" + description: | + If `1`, `true`, then it will copy UID/GID maps to the dest file or + dir + type: "string" + - name: "inputStream" + in: "body" + required: true + description: | + The input stream must be a tar archive compressed with one of the + following algorithms: `identity` (no compression), `gzip`, `bzip2`, + or `xz`. + schema: + type: "string" + format: "binary" + tags: ["Container"] + /containers/prune: + post: + summary: "Delete stopped containers" + produces: + - "application/json" + operationId: "ContainerPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=` Prune containers created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune containers with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ContainerPruneResponse" + properties: + ContainersDeleted: + description: "Container IDs that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /images/json: + get: + summary: "List Images" + description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." + operationId: "ImageList" + produces: + - "application/json" + responses: + 200: + description: "Summary image data for the images matching the query" + schema: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "all" + in: "query" + description: "Show all images. Only images from a final layer (no children) are shown by default." + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the images list. + + Available filters: + + - `before`=(`[:]`, `` or ``) + - `dangling=true` + - `label=key` or `label="key=value"` of an image label + - `reference`=(`[:]`) + - `since`=(`[:]`, `` or ``) + - `until=` + type: "string" + - name: "shared-size" + in: "query" + description: "Compute and show shared size as a `SharedSize` field on each image." + type: "boolean" + default: false + - name: "digests" + in: "query" + description: "Show digest information as a `RepoDigests` field on each image." + type: "boolean" + default: false + - name: "manifests" + in: "query" + description: "Include `Manifests` in the image summary." + type: "boolean" + default: false + tags: ["Image"] + /build: + post: + summary: "Build an image" + description: | + Build an image from a tar archive with a `Dockerfile` in it. + + The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). + + The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. + + The build is canceled if the client drops the connection by quitting or being killed. + operationId: "ImageBuild" + consumes: + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "inputStream" + in: "body" + description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." + schema: + type: "string" + format: "binary" + - name: "dockerfile" + in: "query" + description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." + type: "string" + default: "Dockerfile" + - name: "t" + in: "query" + description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." + type: "string" + - name: "extrahosts" + in: "query" + description: "Extra hosts to add to /etc/hosts" + type: "string" + - name: "remote" + in: "query" + description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." + type: "string" + - name: "q" + in: "query" + description: "Suppress verbose build output." + type: "boolean" + default: false + - name: "nocache" + in: "query" + description: "Do not use the cache when building the image." + type: "boolean" + default: false + - name: "cachefrom" + in: "query" + description: "JSON array of images used for build cache resolution." + type: "string" + - name: "pull" + in: "query" + description: "Attempt to pull the image even if an older image exists locally." + type: "string" + - name: "rm" + in: "query" + description: "Remove intermediate containers after a successful build." + type: "boolean" + default: true + - name: "forcerm" + in: "query" + description: "Always remove intermediate containers, even upon failure." + type: "boolean" + default: false + - name: "memory" + in: "query" + description: "Set memory limit for build." + type: "integer" + - name: "memswap" + in: "query" + description: "Total memory (memory + swap). Set as `-1` to disable swap." + type: "integer" + - name: "cpushares" + in: "query" + description: "CPU shares (relative weight)." + type: "integer" + - name: "cpusetcpus" + in: "query" + description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." + type: "string" + - name: "cpuperiod" + in: "query" + description: "The length of a CPU period in microseconds." + type: "integer" + - name: "cpuquota" + in: "query" + description: "Microseconds of CPU time that the container can get in a CPU period." + type: "integer" + - name: "buildargs" + in: "query" + description: > + JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker + uses the buildargs as the environment context for commands run via the `Dockerfile` RUN + instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for + passing secret values. + + + For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the + query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded. + + + [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg) + type: "string" + - name: "shmsize" + in: "query" + description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." + type: "integer" + - name: "squash" + in: "query" + description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" + type: "boolean" + - name: "labels" + in: "query" + description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." + type: "string" + - name: "networkmode" + in: "query" + description: | + Sets the networking mode for the run commands during build. Supported + standard values are: `bridge`, `host`, `none`, and `container:`. + Any other value is taken as a custom network's name or ID to which this + container should connect to. + type: "string" + - name: "Content-type" + in: "header" + type: "string" + enum: + - "application/x-tar" + default: "application/x-tar" + - name: "X-Registry-Config" + in: "header" + description: | + This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. + + The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: + + ``` + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + ``` + + Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. + type: "string" + - name: "platform" + in: "query" + description: "Platform in the format os[/arch[/variant]]" + type: "string" + default: "" + - name: "target" + in: "query" + description: "Target build stage" + type: "string" + default: "" + - name: "outputs" + in: "query" + description: "BuildKit output configuration" + type: "string" + default: "" + - name: "version" + in: "query" + type: "string" + default: "1" + enum: ["1", "2"] + description: | + Version of the builder backend to use. + + - `1` is the first generation classic (deprecated) builder in the Docker daemon (default) + - `2` is [BuildKit](https://github.com/moby/buildkit) + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /build/prune: + post: + summary: "Delete builder cache" + produces: + - "application/json" + operationId: "BuildPrune" + parameters: + - name: "keep-storage" + in: "query" + description: | + Amount of disk space in bytes to keep for cache + + > **Deprecated**: This parameter is deprecated and has been renamed to "reserved-space". + > It is kept for backward compatibility and will be removed in API v1.49. + type: "integer" + format: "int64" + - name: "reserved-space" + in: "query" + description: "Amount of disk space in bytes to keep for cache" + type: "integer" + format: "int64" + - name: "max-used-space" + in: "query" + description: "Maximum amount of disk space allowed to keep for cache" + type: "integer" + format: "int64" + - name: "min-free-space" + in: "query" + description: "Target amount of free disk space after pruning" + type: "integer" + format: "int64" + - name: "all" + in: "query" + type: "boolean" + description: "Remove all types of build cache" + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the list of build cache objects. + + Available filters: + + - `until=` remove cache older than ``. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon's local time. + - `id=` + - `parent=` + - `type=` + - `description=` + - `inuse` + - `shared` + - `private` + responses: + 200: + description: "No error" + schema: + type: "object" + title: "BuildPruneResponse" + properties: + CachesDeleted: + type: "array" + items: + description: "ID of build cache object" + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /images/create: + post: + summary: "Create an image" + description: "Pull or import an image." + operationId: "ImageCreate" + consumes: + - "text/plain" + - "application/octet-stream" + produces: + - "application/json" + responses: + 200: + description: "no error" + 404: + description: "repository does not exist or no read access" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "fromImage" + in: "query" + description: | + Name of the image to pull. If the name includes a tag or digest, specific behavior applies: + + - If only `fromImage` includes a tag, that tag is used. + - If both `fromImage` and `tag` are provided, `tag` takes precedence. + - If `fromImage` includes a digest, the image is pulled by digest, and `tag` is ignored. + - If neither a tag nor digest is specified, all tags are pulled. + type: "string" + - name: "fromSrc" + in: "query" + description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." + type: "string" + - name: "repo" + in: "query" + description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." + type: "string" + - name: "tag" + in: "query" + description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." + type: "string" + - name: "message" + in: "query" + description: "Set commit message for imported image." + type: "string" + - name: "inputImage" + in: "body" + description: "Image content if the value `-` has been specified in fromSrc query parameter" + schema: + type: "string" + required: false + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "changes" + in: "query" + description: | + Apply `Dockerfile` instructions to the image that is created, + for example: `changes=ENV DEBUG=true`. + Note that `ENV DEBUG=true` should be URI component encoded. + + Supported `Dockerfile` instructions: + `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` + type: "array" + items: + type: "string" + - name: "platform" + in: "query" + description: | + Platform in the format os[/arch[/variant]]. + + When used in combination with the `fromImage` option, the daemon checks + if the given image is present in the local image cache with the given + OS and Architecture, and otherwise attempts to pull the image. If the + option is not set, the host's native OS and Architecture are used. + If the given image does not exist in the local image cache, the daemon + attempts to pull the image with the host's native OS and Architecture. + If the given image does exists in the local image cache, but its OS or + architecture does not match, a warning is produced. + + When used with the `fromSrc` option to import an image from an archive, + this option sets the platform information for the imported image. If + the option is not set, the host's native OS and Architecture are used + for the imported image. + type: "string" + default: "" + tags: ["Image"] + /images/{name}/json: + get: + summary: "Inspect an image" + description: "Return low-level information about an image." + operationId: "ImageInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/ImageInspect" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + - name: "manifests" + in: "query" + description: "Include Manifests in the image summary." + type: "boolean" + default: false + required: false + tags: ["Image"] + /images/{name}/history: + get: + summary: "Get the history of an image" + description: "Return parent layers of an image." + operationId: "ImageHistory" + produces: ["application/json"] + responses: + 200: + description: "List of image layers" + schema: + type: "array" + items: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false + examples: + application/json: + - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" + Created: 1398108230 + CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" + Tags: + - "ubuntu:lucid" + - "ubuntu:10.04" + Size: 182964289 + Comment: "" + - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" + Created: 1398108222 + CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" + Tags: [] + Size: 0 + Comment: "" + - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" + Created: 1371157430 + CreatedBy: "" + Tags: + - "scratch12:latest" + - "scratch:latest" + Size: 0 + Comment: "Imported from -" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "platform" + type: "string" + in: "query" + description: | + JSON-encoded OCI platform to select the platform-variant. + If omitted, it defaults to any locally available platform, + prioritizing the daemon's host platform. + + If the daemon provides a multi-platform image store, this selects + the platform-variant to show the history for. If the image is + a single-platform image, or if the multi-platform image does not + provide a variant matching the given platform, an error is returned. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /images/{name}/push: + post: + summary: "Push an image" + description: | + Push an image to a registry. + + If you wish to push an image on to a private registry, that image must + already have a tag which references the registry. For example, + `registry.example.com/myimage:latest`. + + The push is cancelled if the HTTP connection is closed. + operationId: "ImagePush" + consumes: + - "application/octet-stream" + responses: + 200: + description: "No error" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + Name of the image to push. For example, `registry.example.com/myimage`. + The image must be present in the local image store with the same name. + + The name should be provided without tag; if a tag is provided, it + is ignored. For example, `registry.example.com/myimage:latest` is + considered equivalent to `registry.example.com/myimage`. + + Use the `tag` parameter to specify the tag to push. + type: "string" + required: true + - name: "tag" + in: "query" + description: | + Tag of the image to push. For example, `latest`. If no tag is provided, + all tags of the given image that are present in the local image store + are pushed. + type: "string" + - name: "platform" + type: "string" + in: "query" + description: | + JSON-encoded OCI platform to select the platform-variant to push. + If not provided, all available variants will attempt to be pushed. + + If the daemon provides a multi-platform image store, this selects + the platform-variant to push to the registry. If the image is + a single-platform image, or if the multi-platform image does not + provide a variant matching the given platform, an error is returned. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + required: true + tags: ["Image"] + /images/{name}/tag: + post: + summary: "Tag an image" + description: "Tag an image so that it becomes part of a repository." + operationId: "ImageTag" + responses: + 201: + description: "No error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID to tag." + type: "string" + required: true + - name: "repo" + in: "query" + description: "The repository to tag in. For example, `someuser/someimage`." + type: "string" + - name: "tag" + in: "query" + description: "The name of the new tag." + type: "string" + tags: ["Image"] + /images/{name}: + delete: + summary: "Remove an image" + description: | + Remove an image, along with any untagged parent images that were + referenced by that image. + + Images can't be removed if they have descendant images, are being + used by a running container or are being used by a build. + operationId: "ImageDelete" + produces: ["application/json"] + responses: + 200: + description: "The image was deleted successfully" + schema: + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + examples: + application/json: + - Untagged: "3e2f21a89f" + - Deleted: "3e2f21a89f" + - Deleted: "53b4f83ac9" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "force" + in: "query" + description: "Remove the image even if it is being used by stopped containers or has other tags" + type: "boolean" + default: false + - name: "noprune" + in: "query" + description: "Do not delete untagged parent images" + type: "boolean" + default: false + tags: ["Image"] + /images/search: + get: + summary: "Search images" + description: "Search for an image on Docker Hub." + operationId: "ImageSearch" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + type: "object" + title: "ImageSearchResponseItem" + properties: + description: + type: "string" + is_official: + type: "boolean" + is_automated: + description: | + Whether this repository has automated builds enabled. + +


+ + > **Deprecated**: This field is deprecated and will always be "false". + type: "boolean" + example: false + name: + type: "string" + star_count: + type: "integer" + examples: + application/json: + - description: "A minimal Docker image based on Alpine Linux with a complete package index and only 5 MB in size!" + is_official: true + is_automated: false + name: "alpine" + star_count: 10093 + - description: "Busybox base image." + is_official: true + is_automated: false + name: "Busybox base image." + star_count: 3037 + - description: "The PostgreSQL object-relational database system provides reliability and data integrity." + is_official: true + is_automated: false + name: "postgres" + star_count: 12408 + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "term" + in: "query" + description: "Term to search" + type: "string" + required: true + - name: "limit" + in: "query" + description: "Maximum number of results to return" + type: "integer" + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: + + - `is-official=(true|false)` + - `stars=` Matches images that has at least 'number' stars. + type: "string" + tags: ["Image"] + /images/prune: + post: + summary: "Delete unused images" + produces: + - "application/json" + operationId: "ImagePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: + + - `dangling=` When set to `true` (or `1`), prune only + unused *and* untagged images. When set to `false` + (or `0`), all unused images are pruned. + - `until=` Prune images created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune images with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ImagePruneResponse" + properties: + ImagesDeleted: + description: "Images that were deleted" + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /auth: + post: + summary: "Check auth configuration" + description: | + Validate credentials for a registry and, if available, get an identity + token for accessing the registry without password. + operationId: "SystemAuth" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "An identity token was generated successfully." + schema: + type: "object" + title: "SystemAuthResponse" + required: [Status] + properties: + Status: + description: "The status of the authentication" + type: "string" + x-nullable: false + IdentityToken: + description: "An opaque token used to authenticate a user after a successful login" + type: "string" + x-nullable: false + examples: + application/json: + Status: "Login Succeeded" + IdentityToken: "9cbaf023786cd7..." + 204: + description: "No error" + 401: + description: "Auth error" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "authConfig" + in: "body" + description: "Authentication to check" + schema: + $ref: "#/definitions/AuthConfig" + tags: ["System"] + /info: + get: + summary: "Get system information" + operationId: "SystemInfo" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/SystemInfo" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /version: + get: + summary: "Get version" + description: "Returns the version of Docker that is running and various information about the system that Docker is running on." + operationId: "SystemVersion" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/SystemVersion" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /_ping: + get: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPing" + produces: ["text/plain"] + responses: + 200: + description: "no error" + schema: + type: "string" + example: "OK" + headers: + Api-Version: + type: "string" + description: "Max API Version the server supports" + Builder-Version: + type: "string" + description: | + Default version of docker image builder + + The default on Linux is version "2" (BuildKit), but the daemon + can be configured to recommend version "1" (classic Builder). + Windows does not yet support BuildKit for native Windows images, + and uses "1" (classic builder) as a default. + + This value is a recommendation as advertised by the daemon, and + it is up to the client to choose which builder to use. + default: "2" + Docker-Experimental: + type: "boolean" + description: "If the server is running with experimental mode enabled" + Swarm: + type: "string" + enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] + description: | + Contains information about Swarm status of the daemon, + and if the daemon is acting as a manager or worker node. + default: "inactive" + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + headers: + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + tags: ["System"] + head: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPingHead" + produces: ["text/plain"] + responses: + 200: + description: "no error" + schema: + type: "string" + example: "(empty)" + headers: + Api-Version: + type: "string" + description: "Max API Version the server supports" + Builder-Version: + type: "string" + description: "Default version of docker image builder" + Docker-Experimental: + type: "boolean" + description: "If the server is running with experimental mode enabled" + Swarm: + type: "string" + enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] + description: | + Contains information about Swarm status of the daemon, + and if the daemon is acting as a manager or worker node. + default: "inactive" + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /commit: + post: + summary: "Create a new image from a container" + operationId: "ImageCommit" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "containerConfig" + in: "body" + description: "The container configuration" + schema: + $ref: "#/definitions/ContainerConfig" + - name: "container" + in: "query" + description: "The ID or name of the container to commit" + type: "string" + - name: "repo" + in: "query" + description: "Repository name for the created image" + type: "string" + - name: "tag" + in: "query" + description: "Tag name for the create image" + type: "string" + - name: "comment" + in: "query" + description: "Commit message" + type: "string" + - name: "author" + in: "query" + description: "Author of the image (e.g., `John Hannibal Smith `)" + type: "string" + - name: "pause" + in: "query" + description: "Whether to pause the container before committing" + type: "boolean" + default: true + - name: "changes" + in: "query" + description: "`Dockerfile` instructions to apply while committing" + type: "string" + tags: ["Image"] + /events: + get: + summary: "Monitor events" + description: | + Stream real-time events from the server. + + Various objects within Docker report events when something happens to them. + + Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, `update`, and `prune` + + Images report these events: `create`, `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune` + + Volumes report these events: `create`, `mount`, `unmount`, `destroy`, and `prune` + + Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, `remove`, and `prune` + + The Docker daemon reports these events: `reload` + + Services report these events: `create`, `update`, and `remove` + + Nodes report these events: `create`, `update`, and `remove` + + Secrets report these events: `create`, `update`, and `remove` + + Configs report these events: `create`, `update`, and `remove` + + The Builder reports `prune` events + + operationId: "SystemEvents" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/EventMessage" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "since" + in: "query" + description: "Show events created since this timestamp then stream new events." + type: "string" + - name: "until" + in: "query" + description: "Show events created until this timestamp then stop streaming." + type: "string" + - name: "filters" + in: "query" + description: | + A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: + + - `config=` config name or ID + - `container=` container name or ID + - `daemon=` daemon name or ID + - `event=` event type + - `image=` image name or ID + - `label=` image or container label + - `network=` network name or ID + - `node=` node ID + - `plugin`= plugin name or ID + - `scope`= local or swarm + - `secret=` secret name or ID + - `service=` service name or ID + - `type=` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config` + - `volume=` volume name + type: "string" + tags: ["System"] + /system/df: + get: + summary: "Get data usage information" + operationId: "SystemDataUsage" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "SystemDataUsageResponse" + properties: + LayersSize: + type: "integer" + format: "int64" + Images: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + Containers: + type: "array" + items: + $ref: "#/definitions/ContainerSummary" + Volumes: + type: "array" + items: + $ref: "#/definitions/Volume" + BuildCache: + type: "array" + items: + $ref: "#/definitions/BuildCache" + example: + LayersSize: 1092588 + Images: + - + Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + ParentId: "" + RepoTags: + - "busybox:latest" + RepoDigests: + - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6" + Created: 1466724217 + Size: 1092588 + SharedSize: 0 + Labels: {} + Containers: 1 + Containers: + - + Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148" + Names: + - "/top" + Image: "busybox" + ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + Command: "top" + Created: 1472592424 + Ports: [] + SizeRootFs: 1092588 + Labels: {} + State: "exited" + Status: "Exited (0) 56 minutes ago" + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + IPAMConfig: null + Links: null + Aliases: null + NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92" + EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a" + Gateway: "172.18.0.1" + IPAddress: "172.18.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:12:00:02" + Mounts: [] + Volumes: + - + Name: "my-volume" + Driver: "local" + Mountpoint: "/var/lib/docker/volumes/my-volume/_data" + Labels: null + Scope: "local" + Options: null + UsageData: + Size: 10920104 + RefCount: 2 + BuildCache: + - + ID: "hw53o5aio51xtltp5xjp8v7fx" + Parents: [] + Type: "regular" + Description: "pulled from docker.io/library/debian@sha256:234cb88d3020898631af0ccbbcca9a66ae7306ecd30c9720690858c1b007d2a0" + InUse: false + Shared: true + Size: 0 + CreatedAt: "2021-06-28T13:31:01.474619385Z" + LastUsedAt: "2021-07-07T22:02:32.738075951Z" + UsageCount: 26 + - + ID: "ndlpt0hhvkqcdfkputsk4cq9c" + Parents: ["ndlpt0hhvkqcdfkputsk4cq9c"] + Type: "regular" + Description: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" + InUse: false + Shared: true + Size: 51 + CreatedAt: "2021-06-28T13:31:03.002625487Z" + LastUsedAt: "2021-07-07T22:02:32.773909517Z" + UsageCount: 26 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "type" + in: "query" + description: | + Object types, for which to compute and return data. + type: "array" + collectionFormat: multi + items: + type: "string" + enum: ["container", "image", "volume", "build-cache"] + tags: ["System"] + /images/{name}/get: + get: + summary: "Export an image" + description: | + Get a tarball containing all images and metadata for a repository. + + If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. + + ### Image tarball format + + An image tarball contains [Content as defined in the OCI Image Layout Specification](https://github.com/opencontainers/image-spec/blob/v1.1.1/image-layout.md#content). + + Additionally, includes the manifest.json file associated with a backwards compatible docker save format. + + If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. + + ```json + { + "hello-world": { + "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" + } + } + ``` + operationId: "ImageGet" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "platform" + type: "string" + in: "query" + description: | + JSON encoded OCI platform describing a platform which will be used + to select a platform-specific image to be saved if the image is + multi-platform. + If not provided, the full multi-platform image will be saved. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /images/get: + get: + summary: "Export several images" + description: | + Get a tarball containing all images and metadata for several image + repositories. + + For each value of the `names` parameter: if it is a specific name and + tag (e.g. `ubuntu:latest`), then only that image (and its parents) are + returned; if it is an image ID, similarly only that image (and its parents) + are returned and there would be no names referenced in the 'repositories' + file for this image ID. + + For details on the format, see the [export image endpoint](#operation/ImageGet). + operationId: "ImageGetAll" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "names" + in: "query" + description: "Image names to filter by" + type: "array" + items: + type: "string" + - name: "platform" + type: "string" + in: "query" + description: | + JSON encoded OCI platform describing a platform which will be used + to select a platform-specific image to be saved if the image is + multi-platform. + If not provided, the full multi-platform image will be saved. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /images/load: + post: + summary: "Import images" + description: | + Load a set of images and tags into a repository. + + For details on the format, see the [export image endpoint](#operation/ImageGet). + operationId: "ImageLoad" + consumes: + - "application/x-tar" + produces: + - "application/json" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "imagesTarball" + in: "body" + description: "Tar archive containing images" + schema: + type: "string" + format: "binary" + - name: "quiet" + in: "query" + description: "Suppress progress details during load." + type: "boolean" + default: false + - name: "platform" + type: "string" + in: "query" + description: | + JSON encoded OCI platform describing a platform which will be used + to select a platform-specific image to be load if the image is + multi-platform. + If not provided, the full multi-platform image will be loaded. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /containers/{id}/exec: + post: + summary: "Create an exec instance" + description: "Run a command inside a running container." + operationId: "ContainerExec" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is paused" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execConfig" + in: "body" + description: "Exec configuration" + schema: + type: "object" + title: "ExecConfig" + properties: + AttachStdin: + type: "boolean" + description: "Attach to `stdin` of the exec command." + AttachStdout: + type: "boolean" + description: "Attach to `stdout` of the exec command." + AttachStderr: + type: "boolean" + description: "Attach to `stderr` of the exec command." + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array." + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + example: [80, 64] + DetachKeys: + type: "string" + description: | + Override the key sequence for detaching a container. Format is + a single character `[a-Z]` or `ctrl-` where `` + is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + Env: + description: | + A list of environment variables in the form `["VAR=value", ...]`. + type: "array" + items: + type: "string" + Cmd: + type: "array" + description: "Command to run, as a string or array of strings." + items: + type: "string" + Privileged: + type: "boolean" + description: "Runs the exec process with extended privileges." + default: false + User: + type: "string" + description: | + The user, and optionally, group to run the exec process inside + the container. Format is one of: `user`, `user:group`, `uid`, + or `uid:gid`. + WorkingDir: + type: "string" + description: | + The working directory for the exec process inside the container. + example: + AttachStdin: false + AttachStdout: true + AttachStderr: true + DetachKeys: "ctrl-p,ctrl-q" + Tty: false + Cmd: + - "date" + Env: + - "FOO=bar" + - "BAZ=quux" + required: true + - name: "id" + in: "path" + description: "ID or name of container" + type: "string" + required: true + tags: ["Exec"] + /exec/{id}/start: + post: + summary: "Start an exec instance" + description: | + Starts a previously set up exec instance. If detach is true, this endpoint + returns immediately after starting the command. Otherwise, it sets up an + interactive session with the command. + operationId: "ExecStart" + consumes: + - "application/json" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 200: + description: "No error" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Container is stopped or paused" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execStartConfig" + in: "body" + schema: + type: "object" + title: "ExecStartConfig" + properties: + Detach: + type: "boolean" + description: "Detach from the command." + example: false + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + example: true + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array." + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + example: [80, 64] + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + /exec/{id}/resize: + post: + summary: "Resize an exec instance" + description: | + Resize the TTY session used by an exec instance. This endpoint only works + if `tty` was specified as part of creating and starting the exec instance. + operationId: "ExecResize" + responses: + 200: + description: "No error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + - name: "h" + in: "query" + required: true + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + required: true + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Exec"] + /exec/{id}/json: + get: + summary: "Inspect an exec instance" + description: "Return low-level information about an exec instance." + operationId: "ExecInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ExecInspectResponse" + properties: + CanRemove: + type: "boolean" + DetachKeys: + type: "string" + ID: + type: "string" + Running: + type: "boolean" + ExitCode: + type: "integer" + ProcessConfig: + $ref: "#/definitions/ProcessConfig" + OpenStdin: + type: "boolean" + OpenStderr: + type: "boolean" + OpenStdout: + type: "boolean" + ContainerID: + type: "string" + Pid: + type: "integer" + description: "The system process ID for the exec process." + examples: + application/json: + CanRemove: false + ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" + DetachKeys: "" + ExitCode: 2 + ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" + OpenStderr: true + OpenStdin: true + OpenStdout: true + ProcessConfig: + arguments: + - "-c" + - "exit 2" + entrypoint: "sh" + privileged: false + tty: true + user: "1000" + Running: false + Pid: 42000 + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + + /volumes: + get: + summary: "List volumes" + operationId: "VolumeList" + produces: ["application/json"] + responses: + 200: + description: "Summary volume data that matches the query" + schema: + $ref: "#/definitions/VolumeListResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to + process on the volumes list. Available filters: + + - `dangling=` When set to `true` (or `1`), returns all + volumes that are not in use by a container. When set to `false` + (or `0`), only volumes that are in use by one or more + containers are returned. + - `driver=` Matches volumes based on their driver. + - `label=` or `label=:` Matches volumes based on + the presence of a `label` alone or a `label` and a value. + - `name=` Matches all or part of a volume name. + type: "string" + format: "json" + tags: ["Volume"] + + /volumes/create: + post: + summary: "Create a volume" + operationId: "VolumeCreate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 201: + description: "The volume was created successfully" + schema: + $ref: "#/definitions/Volume" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "volumeConfig" + in: "body" + required: true + description: "Volume configuration" + schema: + $ref: "#/definitions/VolumeCreateOptions" + tags: ["Volume"] + + /volumes/{name}: + get: + summary: "Inspect a volume" + operationId: "VolumeInspect" + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Volume" + 404: + description: "No such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + tags: ["Volume"] + + put: + summary: | + "Update a volume. Valid only for Swarm cluster volumes" + operationId: "VolumeUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name or ID of the volume" + type: "string" + required: true + - name: "body" + in: "body" + schema: + # though the schema for is an object that contains only a + # ClusterVolumeSpec, wrapping the ClusterVolumeSpec in this object + # means that if, later on, we support things like changing the + # labels, we can do so without duplicating that information to the + # ClusterVolumeSpec. + type: "object" + description: "Volume configuration" + properties: + Spec: + $ref: "#/definitions/ClusterVolumeSpec" + description: | + The spec of the volume to update. Currently, only Availability may + change. All other fields must remain unchanged. + - name: "version" + in: "query" + description: | + The version number of the volume being updated. This is required to + avoid conflicting writes. Found in the volume's `ClusterVolume` + field. + type: "integer" + format: "int64" + required: true + tags: ["Volume"] + + delete: + summary: "Remove a volume" + description: "Instruct the driver to remove the volume." + operationId: "VolumeDelete" + responses: + 204: + description: "The volume was removed" + 404: + description: "No such volume or volume driver" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Volume is in use and cannot be removed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + - name: "force" + in: "query" + description: "Force the removal of the volume" + type: "boolean" + default: false + tags: ["Volume"] + + /volumes/prune: + post: + summary: "Delete unused volumes" + produces: + - "application/json" + operationId: "VolumePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune volumes with (or without, in case `label!=...` is used) the specified labels. + - `all` (`all=true`) - Consider all (local) volumes for pruning and not just anonymous volumes. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "VolumePruneResponse" + properties: + VolumesDeleted: + description: "Volumes that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Volume"] + /networks: + get: + summary: "List networks" + description: | + Returns a list of networks. For details on the format, see the + [network inspect endpoint](#operation/NetworkInspect). + + Note that it uses a different, smaller representation of a network than + inspecting a single network. For example, the list of containers attached + to the network is not propagated in API versions 1.28 and up. + operationId: "NetworkList" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Network" + examples: + application/json: + - Name: "bridge" + Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" + Created: "2016-10-19T06:21:00.416543526Z" + Scope: "local" + Driver: "bridge" + EnableIPv4: true + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: + - + Subnet: "172.17.0.0/16" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + - Name: "none" + Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "null" + EnableIPv4: false + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + - Name: "host" + Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "host" + EnableIPv4: false + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to process + on the networks list. + + Available filters: + + - `dangling=` When set to `true` (or `1`), returns all + networks that are not in use by a container. When set to `false` + (or `0`), only networks that are in use by one or more + containers are returned. + - `driver=` Matches a network's driver. + - `id=` Matches all or part of a network ID. + - `label=` or `label==` of a network label. + - `name=` Matches all or part of a network name. + - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`). + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + type: "string" + tags: ["Network"] + + /networks/{id}: + get: + summary: "Inspect a network" + operationId: "NetworkInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Network" + 404: + description: "Network not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "verbose" + in: "query" + description: "Detailed inspect output for troubleshooting" + type: "boolean" + default: false + - name: "scope" + in: "query" + description: "Filter the network by scope (swarm, global, or local)" + type: "string" + tags: ["Network"] + + delete: + summary: "Remove a network" + operationId: "NetworkDelete" + responses: + 204: + description: "No error" + 403: + description: "operation not supported for pre-defined networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such network" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + tags: ["Network"] + + /networks/create: + post: + summary: "Create a network" + operationId: "NetworkCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "Network created successfully" + schema: + $ref: "#/definitions/NetworkCreateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: | + Forbidden operation. This happens when trying to create a network named after a pre-defined network, + or when trying to create an overlay network on a daemon which is not part of a Swarm cluster. + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "plugin not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "networkConfig" + in: "body" + description: "Network configuration" + required: true + schema: + type: "object" + title: "NetworkCreateRequest" + required: ["Name"] + properties: + Name: + description: "The network's name." + type: "string" + example: "my_network" + Driver: + description: "Name of the network driver plugin to use." + type: "string" + default: "bridge" + example: "bridge" + Scope: + description: | + The level at which the network exists (e.g. `swarm` for cluster-wide + or `local` for machine level). + type: "string" + Internal: + description: "Restrict external access to the network." + type: "boolean" + Attachable: + description: | + Globally scoped network is manually attachable by regular + containers from workers in swarm mode. + type: "boolean" + example: true + Ingress: + description: | + Ingress network is the network which provides the routing-mesh + in swarm mode. + type: "boolean" + example: false + ConfigOnly: + description: | + Creates a config-only network. Config-only networks are placeholder + networks for network configurations to be used by other networks. + Config-only networks cannot be used directly to run containers + or services. + type: "boolean" + default: false + example: false + ConfigFrom: + description: | + Specifies the source which will provide the configuration for + this network. The specified network must be an existing + config-only network; see ConfigOnly. + $ref: "#/definitions/ConfigReference" + IPAM: + description: "Optional custom IP scheme for the network." + $ref: "#/definitions/IPAM" + EnableIPv4: + description: "Enable IPv4 on the network." + type: "boolean" + example: true + EnableIPv6: + description: "Enable IPv6 on the network." + type: "boolean" + example: true + Options: + description: "Network specific options to be used by the drivers." + type: "object" + additionalProperties: + type: "string" + example: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + tags: ["Network"] + + /networks/{id}/connect: + post: + summary: "Connect a container to a network" + description: "The network must be either a local-scoped network or a swarm-scoped network with the `attachable` option set. A network cannot be re-attached to a running container" + operationId: "NetworkConnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "Operation forbidden" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + title: "NetworkConnectRequest" + properties: + Container: + type: "string" + description: "The ID or name of the container to connect to the network." + EndpointConfig: + $ref: "#/definitions/EndpointSettings" + example: + Container: "3613f73ba0e4" + EndpointConfig: + IPAMConfig: + IPv4Address: "172.24.56.89" + IPv6Address: "2001:db8::5689" + MacAddress: "02:42:ac:12:05:02" + Priority: 100 + tags: ["Network"] + + /networks/{id}/disconnect: + post: + summary: "Disconnect a container from a network" + operationId: "NetworkDisconnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + title: "NetworkDisconnectRequest" + properties: + Container: + type: "string" + description: | + The ID or name of the container to disconnect from the network. + Force: + type: "boolean" + description: | + Force the container to disconnect from the network. + tags: ["Network"] + /networks/prune: + post: + summary: "Delete unused networks" + produces: + - "application/json" + operationId: "NetworkPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=` Prune networks created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune networks with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "NetworkPruneResponse" + properties: + NetworksDeleted: + description: "Networks that were deleted" + type: "array" + items: + type: "string" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Network"] + /plugins: + get: + summary: "List plugins" + operationId: "PluginList" + description: "Returns information about installed plugins." + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Plugin" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the plugin list. + + Available filters: + + - `capability=` + - `enable=|` + tags: ["Plugin"] + + /plugins/privileges: + get: + summary: "Get plugin privileges" + operationId: "GetPluginPrivileges" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + tags: + - "Plugin" + + /plugins/pull: + post: + summary: "Install a plugin" + operationId: "PluginPull" + description: | + Pulls and installs a plugin. After the plugin is installed, it can be + enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). + produces: + - "application/json" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + Remote reference for plugin to install. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "name" + in: "query" + description: | + Local name for the pulled plugin. + + The `:latest` tag is optional, and is used as the default if omitted. + required: false + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration to use when pulling a plugin + from a registry. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/{name}/json: + get: + summary: "Inspect a plugin" + operationId: "PluginInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + tags: ["Plugin"] + /plugins/{name}: + delete: + summary: "Remove a plugin" + operationId: "PluginDelete" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "force" + in: "query" + description: | + Disable the plugin before removing. This may result in issues if the + plugin is in use by a container. + type: "boolean" + default: false + tags: ["Plugin"] + /plugins/{name}/enable: + post: + summary: "Enable a plugin" + operationId: "PluginEnable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "timeout" + in: "query" + description: "Set the HTTP client timeout (in seconds)" + type: "integer" + default: 0 + tags: ["Plugin"] + /plugins/{name}/disable: + post: + summary: "Disable a plugin" + operationId: "PluginDisable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "force" + in: "query" + description: | + Force disable a plugin even if still in use. + required: false + type: "boolean" + tags: ["Plugin"] + /plugins/{name}/upgrade: + post: + summary: "Upgrade a plugin" + operationId: "PluginUpgrade" + responses: + 204: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "remote" + in: "query" + description: | + Remote reference to upgrade to. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration to use when pulling a plugin + from a registry. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/create: + post: + summary: "Create a plugin" + operationId: "PluginCreate" + consumes: + - "application/x-tar" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "query" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "tarContext" + in: "body" + description: "Path to tar containing plugin rootfs and manifest" + schema: + type: "string" + format: "binary" + tags: ["Plugin"] + /plugins/{name}/push: + post: + summary: "Push a plugin" + operationId: "PluginPush" + description: | + Push a plugin to the registry. + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + responses: + 200: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /plugins/{name}/set: + post: + summary: "Configure a plugin" + operationId: "PluginSet" + consumes: + - "application/json" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + type: "string" + example: ["DEBUG=1"] + responses: + 204: + description: "No error" + 404: + description: "Plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /nodes: + get: + summary: "List nodes" + operationId: "NodeList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Node" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `id=` + - `label=` + - `membership=`(`accepted`|`pending`)` + - `name=` + - `node.label=` + - `role=`(`manager`|`worker`)` + type: "string" + tags: ["Node"] + /nodes/{id}: + get: + summary: "Inspect a node" + operationId: "NodeInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Node" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + tags: ["Node"] + delete: + summary: "Delete a node" + operationId: "NodeDelete" + responses: + 200: + description: "no error" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + - name: "force" + in: "query" + description: "Force remove a node from the swarm" + default: false + type: "boolean" + tags: ["Node"] + /nodes/{id}/update: + post: + summary: "Update a node" + operationId: "NodeUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID of the node" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/NodeSpec" + - name: "version" + in: "query" + description: | + The version number of the node object being updated. This is required + to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Node"] + /swarm: + get: + summary: "Inspect swarm" + operationId: "SwarmInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Swarm" + 404: + description: "no such swarm" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/init: + post: + summary: "Initialize a new swarm" + operationId: "SwarmInit" + produces: + - "application/json" + - "text/plain" + responses: + 200: + description: "no error" + schema: + description: "The node ID" + type: "string" + example: "7v2t30z9blmxuhnyo6s4cpenp" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmInitRequest" + properties: + ListenAddr: + description: | + Listen address used for inter-manager communication, as well + as determining the networking interface used for the VXLAN + Tunnel Endpoint (VTEP). This can either be an address/port + combination in the form `192.168.1.1:4567`, or an interface + followed by a port number, like `eth0:4567`. If the port number + is omitted, the default swarm listening port is used. + type: "string" + AdvertiseAddr: + description: | + Externally reachable address advertised to other nodes. This + can either be an address/port combination in the form + `192.168.1.1:4567`, or an interface followed by a port number, + like `eth0:4567`. If the port number is omitted, the port + number from the listen address is used. If `AdvertiseAddr` is + not specified, it will be automatically detected when possible. + type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: + ``), for example, `192.168.1.1`, or an interface, + like `eth0`. If `DataPathAddr` is unspecified, the same address + as `AdvertiseAddr` is used. + + The `DataPathAddr` specifies the address that global scope + network drivers will publish towards other nodes in order to + reach the containers running on this node. Using this parameter + it is possible to separate the container data traffic from the + management traffic of the cluster. + type: "string" + DataPathPort: + description: | + DataPathPort specifies the data path port number for data traffic. + Acceptable port range is 1024 to 49151. + if no port is set or is set to 0, default port 4789 will be used. + type: "integer" + format: "uint32" + DefaultAddrPool: + description: | + Default Address Pool specifies default subnet pools for global + scope networks. + type: "array" + items: + type: "string" + example: ["10.10.0.0/16", "20.20.0.0/16"] + ForceNewCluster: + description: "Force creation of a new swarm." + type: "boolean" + SubnetSize: + description: | + SubnetSize specifies the subnet size of the networks created + from the default subnet pool. + type: "integer" + format: "uint32" + Spec: + $ref: "#/definitions/SwarmSpec" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + DataPathPort: 4789 + DefaultAddrPool: ["10.10.0.0/8", "20.20.0.0/8"] + SubnetSize: 24 + ForceNewCluster: false + Spec: + Orchestration: {} + Raft: {} + Dispatcher: {} + CAConfig: {} + EncryptionConfig: + AutoLockManagers: false + tags: ["Swarm"] + /swarm/join: + post: + summary: "Join an existing swarm" + operationId: "SwarmJoin" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmJoinRequest" + properties: + ListenAddr: + description: | + Listen address used for inter-manager communication if the node + gets promoted to manager, as well as determining the networking + interface used for the VXLAN Tunnel Endpoint (VTEP). + type: "string" + AdvertiseAddr: + description: | + Externally reachable address advertised to other nodes. This + can either be an address/port combination in the form + `192.168.1.1:4567`, or an interface followed by a port number, + like `eth0:4567`. If the port number is omitted, the port + number from the listen address is used. If `AdvertiseAddr` is + not specified, it will be automatically detected when possible. + type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: + ``), for example, `192.168.1.1`, or an interface, + like `eth0`. If `DataPathAddr` is unspecified, the same address + as `AdvertiseAddr` is used. + + The `DataPathAddr` specifies the address that global scope + network drivers will publish towards other nodes in order to + reach the containers running on this node. Using this parameter + it is possible to separate the container data traffic from the + management traffic of the cluster. + + type: "string" + RemoteAddrs: + description: | + Addresses of manager nodes already participating in the swarm. + type: "array" + items: + type: "string" + JoinToken: + description: "Secret token for joining this swarm." + type: "string" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + DataPathAddr: "192.168.1.1" + RemoteAddrs: + - "node1:2377" + JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + tags: ["Swarm"] + /swarm/leave: + post: + summary: "Leave a swarm" + operationId: "SwarmLeave" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "force" + description: | + Force leave swarm, even if this is the last manager or that it will + break the cluster. + in: "query" + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/update: + post: + summary: "Update a swarm" + operationId: "SwarmUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + $ref: "#/definitions/SwarmSpec" + - name: "version" + in: "query" + description: | + The version number of the swarm object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + - name: "rotateWorkerToken" + in: "query" + description: "Rotate the worker join token." + type: "boolean" + default: false + - name: "rotateManagerToken" + in: "query" + description: "Rotate the manager join token." + type: "boolean" + default: false + - name: "rotateManagerUnlockKey" + in: "query" + description: "Rotate the manager unlock key." + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/unlockkey: + get: + summary: "Get the unlock key" + operationId: "SwarmUnlockkey" + consumes: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "UnlockKeyResponse" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/unlock: + post: + summary: "Unlock a locked manager" + operationId: "SwarmUnlock" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmUnlockRequest" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /services: + get: + summary: "List services" + operationId: "ServiceList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Service" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the services list. + + Available filters: + + - `id=` + - `label=` + - `mode=["replicated"|"global"]` + - `name=` + - name: "status" + in: "query" + type: "boolean" + description: | + Include service status, with count of running and desired tasks. + tags: ["Service"] + /services/create: + post: + summary: "Create a service" + operationId: "ServiceCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/ServiceCreateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "network is not eligible for services" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "name conflicts with an existing service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "web" + TaskTemplate: + ContainerSpec: + Image: "nginx:alpine" + Mounts: + - + ReadOnly: true + Source: "web-data" + Target: "/usr/share/nginx/html" + Type: "volume" + VolumeOptions: + DriverConfig: {} + Labels: + com.example.something: "something-value" + Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"] + User: "33" + DNSConfig: + Nameservers: ["8.8.8.8"] + Search: ["example.org"] + Options: ["timeout:3"] + Secrets: + - + File: + Name: "www.example.org.key" + UID: "33" + GID: "33" + Mode: 384 + SecretID: "fpjqlhnwb19zds35k8wn80lq9" + SecretName: "example_org_domain_key" + OomScoreAdj: 0 + LogDriver: + Name: "json-file" + Options: + max-file: "3" + max-size: "10M" + Placement: {} + Resources: + Limits: + MemoryBytes: 104857600 + Reservations: {} + RestartPolicy: + Condition: "on-failure" + Delay: 10000000000 + MaxAttempts: 10 + Mode: + Replicated: + Replicas: 4 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Ports: + - + Protocol: "tcp" + PublishedPort: 8080 + TargetPort: 80 + Labels: + foo: "bar" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration for pulling from private + registries. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + tags: ["Service"] + /services/{id}: + get: + summary: "Inspect a service" + operationId: "ServiceInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Service" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "insertDefaults" + in: "query" + description: "Fill empty fields with default values." + type: "boolean" + default: false + tags: ["Service"] + delete: + summary: "Delete a service" + operationId: "ServiceDelete" + responses: + 200: + description: "no error" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + tags: ["Service"] + /services/{id}/update: + post: + summary: "Update a service" + operationId: "ServiceUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ServiceUpdateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "top" + TaskTemplate: + ContainerSpec: + Image: "busybox" + Args: + - "top" + OomScoreAdj: 0 + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + + - name: "version" + in: "query" + description: | + The version number of the service object being updated. This is + required to avoid conflicting writes. + This version number should be the value as currently set on the + service *before* the update. You can find the current version by + calling `GET /services/{id}` + required: true + type: "integer" + - name: "registryAuthFrom" + in: "query" + description: | + If the `X-Registry-Auth` header is not specified, this parameter + indicates where to find registry authorization credentials. + type: "string" + enum: ["spec", "previous-spec"] + default: "spec" + - name: "rollback" + in: "query" + description: | + Set to this parameter to `previous` to cause a server-side rollback + to the previous service spec. The supplied spec will be ignored in + this case. + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration for pulling from private + registries. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + + tags: ["Service"] + /services/{id}/logs: + get: + summary: "Get service logs" + description: | + Get `stdout` and `stderr` logs from a service. See also + [`/containers/{id}/logs`](#operation/ContainerLogs). + + **Note**: This endpoint works only for services with the `local`, + `json-file` or `journald` logging drivers. + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + operationId: "ServiceLogs" + responses: + 200: + description: "logs returned as a stream in response body" + schema: + type: "string" + format: "binary" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such service: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the service" + type: "string" + - name: "details" + in: "query" + description: "Show service context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Service"] + /tasks: + get: + summary: "List tasks" + operationId: "TaskList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Task" + example: + - ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + - ID: "1yljwbmlr8er2waf8orvqpwms" + Version: + Index: 30 + CreatedAt: "2016-06-07T21:07:30.019104782Z" + UpdatedAt: "2016-06-07T21:07:30.231958098Z" + Name: "hopeful_cori" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:30.202183143Z" + State: "shutdown" + Message: "shutdown" + ContainerStatus: + ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" + DesiredState: "shutdown" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.5/16" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the tasks list. + + Available filters: + + - `desired-state=(running | shutdown | accepted)` + - `id=` + - `label=key` or `label="key=value"` + - `name=` + - `node=` + - `service=` + tags: ["Task"] + /tasks/{id}: + get: + summary: "Inspect a task" + operationId: "TaskInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Task" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID of the task" + required: true + type: "string" + tags: ["Task"] + /tasks/{id}/logs: + get: + summary: "Get task logs" + description: | + Get `stdout` and `stderr` logs from a task. + See also [`/containers/{id}/logs`](#operation/ContainerLogs). + + **Note**: This endpoint works only for services with the `local`, + `json-file` or `journald` logging drivers. + operationId: "TaskLogs" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 200: + description: "logs returned as a stream in response body" + schema: + type: "string" + format: "binary" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such task: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID of the task" + type: "string" + - name: "details" + in: "query" + description: "Show task context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Task"] + /secrets: + get: + summary: "List secrets" + operationId: "SecretList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Secret" + example: + - ID: "blt1owaxmitz71s9v5zh81zun" + Version: + Index: 85 + CreatedAt: "2017-07-20T13:55:28.678958722Z" + UpdatedAt: "2017-07-20T13:55:28.678958722Z" + Spec: + Name: "mysql-passwd" + Labels: + some.label: "some.value" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + Labels: + foo: "bar" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the secrets list. + + Available filters: + + - `id=` + - `label= or label==value` + - `name=` + - `names=` + tags: ["Secret"] + /secrets/create: + post: + summary: "Create a secret" + operationId: "SecretCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/SecretSpec" + - type: "object" + example: + Name: "app-key.crt" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + tags: ["Secret"] + /secrets/{id}: + get: + summary: "Inspect a secret" + operationId: "SecretInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Secret" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + Labels: + foo: "bar" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + delete: + summary: "Delete a secret" + operationId: "SecretDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + /secrets/{id}/update: + post: + summary: "Update a Secret" + operationId: "SecretUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such secret" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the secret" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/SecretSpec" + description: | + The spec of the secret to update. Currently, only the Labels field + can be updated. All other fields must remain unchanged from the + [SecretInspect endpoint](#operation/SecretInspect) response values. + - name: "version" + in: "query" + description: | + The version number of the secret object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Secret"] + /configs: + get: + summary: "List configs" + operationId: "ConfigList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Config" + example: + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "server.conf" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the configs list. + + Available filters: + + - `id=` + - `label= or label==value` + - `name=` + - `names=` + tags: ["Config"] + /configs/create: + post: + summary: "Create a config" + operationId: "ConfigCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/ConfigSpec" + - type: "object" + example: + Name: "server.conf" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + tags: ["Config"] + /configs/{id}: + get: + summary: "Inspect a config" + operationId: "ConfigInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Config" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + delete: + summary: "Delete a config" + operationId: "ConfigDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + /configs/{id}/update: + post: + summary: "Update a Config" + operationId: "ConfigUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such config" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the config" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/ConfigSpec" + description: | + The spec of the config to update. Currently, only the Labels field + can be updated. All other fields must remain unchanged from the + [ConfigInspect endpoint](#operation/ConfigInspect) response values. + - name: "version" + in: "query" + description: | + The version number of the config object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Config"] + /distribution/{name}/json: + get: + summary: "Get image information from the registry" + description: | + Return image digest and platform information by contacting the registry. + operationId: "DistributionInspect" + produces: + - "application/json" + responses: + 200: + description: "descriptor and platform information" + schema: + $ref: "#/definitions/DistributionInspect" + 401: + description: "Failed authentication or no image found" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + tags: ["Distribution"] + /session: + post: + summary: "Initialize interactive session" + description: | + Start a new interactive session with a server. Session allows server to + call back to the client for advanced capabilities. + + ### Hijacking + + This endpoint hijacks the HTTP connection to HTTP2 transport that allows + the client to expose gPRC services on that connection. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /session HTTP/1.1 + Upgrade: h2c + Connection: Upgrade + ``` + + The Docker daemon responds with a `101 UPGRADED` response follow with + the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Connection: Upgrade + Upgrade: h2c + ``` + operationId: "Session" + produces: + - "application/vnd.docker.raw-stream" + responses: + 101: + description: "no error, hijacking successful" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Session"] diff --git a/_vendor/github.com/moby/moby/docs/api/v1.50.yaml b/_vendor/github.com/moby/moby/docs/api/v1.50.yaml new file mode 100644 index 000000000000..21f77d2ff6ab --- /dev/null +++ b/_vendor/github.com/moby/moby/docs/api/v1.50.yaml @@ -0,0 +1,13432 @@ +# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API. +# +# This is used for generating API documentation and the types used by the +# client/server. See api/README.md for more information. +# +# Some style notes: +# - This file is used by ReDoc, which allows GitHub Flavored Markdown in +# descriptions. +# - There is no maximum line length, for ease of editing and pretty diffs. +# - operationIds are in the format "NounVerb", with a singular noun. + +swagger: "2.0" +schemes: + - "http" + - "https" +produces: + - "application/json" + - "text/plain" +consumes: + - "application/json" + - "text/plain" +basePath: "/v1.50" +info: + title: "Docker Engine API" + version: "1.50" + x-logo: + url: "https://docs.docker.com/assets/images/logo-docker-main.png" + description: | + The Engine API is an HTTP API served by Docker Engine. It is the API the + Docker client uses to communicate with the Engine, so everything the Docker + client can do can be done with the API. + + Most of the client's commands map directly to API endpoints (e.g. `docker ps` + is `GET /containers/json`). The notable exception is running containers, + which consists of several API calls. + + # Errors + + The API uses standard HTTP status codes to indicate the success or failure + of the API call. The body of the response will be JSON in the following + format: + + ``` + { + "message": "page not found" + } + ``` + + # Versioning + + The API is usually changed in each release, so API calls are versioned to + ensure that clients don't break. To lock to a specific version of the API, + you prefix the URL with its version, for example, call `/v1.30/info` to use + the v1.30 version of the `/info` endpoint. If the API version specified in + the URL is not supported by the daemon, a HTTP `400 Bad Request` error message + is returned. + + If you omit the version-prefix, the current version of the API (v1.50) is used. + For example, calling `/info` is the same as calling `/v1.50/info`. Using the + API without a version-prefix is deprecated and will be removed in a future release. + + Engine releases in the near future should support this version of the API, + so your client will continue to work even if it is talking to a newer Engine. + + The API uses an open schema model, which means the server may add extra properties + to responses. Likewise, the server will ignore any extra query parameters and + request body properties. When you write clients, you need to ignore additional + properties in responses to ensure they do not break when talking to newer + daemons. + + + # Authentication + + Authentication for registries is handled client side. The client has to send + authentication details to various endpoints that need to communicate with + registries, such as `POST /images/(name)/push`. These are sent as + `X-Registry-Auth` header as a [base64url encoded](https://tools.ietf.org/html/rfc4648#section-5) + (JSON) string with the following structure: + + ``` + { + "username": "string", + "password": "string", + "email": "string", + "serveraddress": "string" + } + ``` + + The `serveraddress` is a domain/IP without a protocol. Throughout this + structure, double quotes are required. + + If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), + you can just pass this instead of credentials: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +# The tags on paths define the menu sections in the ReDoc documentation, so +# the usage of tags must make sense for that: +# - They should be singular, not plural. +# - There should not be too many tags, or the menu becomes unwieldy. For +# example, it is preferable to add a path to the "System" tag instead of +# creating a tag with a single path in it. +# - The order of tags in this list defines the order in the menu. +tags: + # Primary objects + - name: "Container" + x-displayName: "Containers" + description: | + Create and manage containers. + - name: "Image" + x-displayName: "Images" + - name: "Network" + x-displayName: "Networks" + description: | + Networks are user-defined networks that containers can be attached to. + See the [networking documentation](https://docs.docker.com/network/) + for more information. + - name: "Volume" + x-displayName: "Volumes" + description: | + Create and manage persistent storage that can be attached to containers. + - name: "Exec" + x-displayName: "Exec" + description: | + Run new commands inside running containers. Refer to the + [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) + for more information. + + To exec a command in a container, you first need to create an exec instance, + then start it. These two API endpoints are wrapped up in a single command-line + command, `docker exec`. + + # Swarm things + - name: "Swarm" + x-displayName: "Swarm" + description: | + Engines can be clustered together in a swarm. Refer to the + [swarm mode documentation](https://docs.docker.com/engine/swarm/) + for more information. + - name: "Node" + x-displayName: "Nodes" + description: | + Nodes are instances of the Engine participating in a swarm. Swarm mode + must be enabled for these endpoints to work. + - name: "Service" + x-displayName: "Services" + description: | + Services are the definitions of tasks to run on a swarm. Swarm mode must + be enabled for these endpoints to work. + - name: "Task" + x-displayName: "Tasks" + description: | + A task is a container running on a swarm. It is the atomic scheduling unit + of swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Secret" + x-displayName: "Secrets" + description: | + Secrets are sensitive data that can be used by services. Swarm mode must + be enabled for these endpoints to work. + - name: "Config" + x-displayName: "Configs" + description: | + Configs are application configurations that can be used by services. Swarm + mode must be enabled for these endpoints to work. + # System things + - name: "Plugin" + x-displayName: "Plugins" + - name: "System" + x-displayName: "System" + +definitions: + Port: + type: "object" + description: "An open port on a container" + required: [PrivatePort, Type] + properties: + IP: + type: "string" + format: "ip-address" + description: "Host IP address that the container's port is mapped to" + PrivatePort: + type: "integer" + format: "uint16" + x-nullable: false + description: "Port on the container" + PublicPort: + type: "integer" + format: "uint16" + description: "Port exposed on the host" + Type: + type: "string" + x-nullable: false + enum: ["tcp", "udp", "sctp"] + example: + PrivatePort: 8080 + PublicPort: 80 + Type: "tcp" + + MountPoint: + type: "object" + description: | + MountPoint represents a mount point configuration inside the container. + This is used for reporting the mountpoints in use by a container. + properties: + Type: + description: | + The mount type: + + - `bind` a mount of a file or directory from the host into the container. + - `volume` a docker volume with the given `Name`. + - `image` a docker image + - `tmpfs` a `tmpfs`. + - `npipe` a named pipe from the host into the container. + - `cluster` a Swarm cluster volume + type: "string" + enum: + - "bind" + - "volume" + - "image" + - "tmpfs" + - "npipe" + - "cluster" + example: "volume" + Name: + description: | + Name is the name reference to the underlying data defined by `Source` + e.g., the volume name. + type: "string" + example: "myvolume" + Source: + description: | + Source location of the mount. + + For volumes, this contains the storage location of the volume (within + `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains + the source (host) part of the bind-mount. For `tmpfs` mount points, this + field is empty. + type: "string" + example: "/var/lib/docker/volumes/myvolume/_data" + Destination: + description: | + Destination is the path relative to the container root (`/`) where + the `Source` is mounted inside the container. + type: "string" + example: "/usr/share/nginx/html/" + Driver: + description: | + Driver is the volume driver used to create the volume (if it is a volume). + type: "string" + example: "local" + Mode: + description: | + Mode is a comma separated list of options supplied by the user when + creating the bind/volume mount. + + The default is platform-specific (`"z"` on Linux, empty on Windows). + type: "string" + example: "z" + RW: + description: | + Whether the mount is mounted writable (read-write). + type: "boolean" + example: true + Propagation: + description: | + Propagation describes how mounts are propagated from the host into the + mount point, and vice-versa. Refer to the [Linux kernel documentation](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt) + for details. This field is not used on Windows. + type: "string" + example: "" + + DeviceMapping: + type: "object" + description: "A device mapping between the host and container" + properties: + PathOnHost: + type: "string" + PathInContainer: + type: "string" + CgroupPermissions: + type: "string" + example: + PathOnHost: "/dev/deviceName" + PathInContainer: "/dev/deviceName" + CgroupPermissions: "mrw" + + DeviceRequest: + type: "object" + description: "A request for devices to be sent to device drivers" + properties: + Driver: + type: "string" + example: "nvidia" + Count: + type: "integer" + example: -1 + DeviceIDs: + type: "array" + items: + type: "string" + example: + - "0" + - "1" + - "GPU-fef8089b-4820-abfc-e83e-94318197576e" + Capabilities: + description: | + A list of capabilities; an OR list of AND lists of capabilities. + type: "array" + items: + type: "array" + items: + type: "string" + example: + # gpu AND nvidia AND compute + - ["gpu", "nvidia", "compute"] + Options: + description: | + Driver-specific options, specified as a key/value pairs. These options + are passed directly to the driver. + type: "object" + additionalProperties: + type: "string" + + ThrottleDevice: + type: "object" + properties: + Path: + description: "Device path" + type: "string" + Rate: + description: "Rate" + type: "integer" + format: "int64" + minimum: 0 + + Mount: + type: "object" + properties: + Target: + description: "Container path." + type: "string" + Source: + description: "Mount source (e.g. a volume name, a host path)." + type: "string" + Type: + description: | + The mount type. Available types: + + - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. + - `image` Mounts an image. + - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. + - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. + - `cluster` a Swarm cluster volume + type: "string" + enum: + - "bind" + - "volume" + - "image" + - "tmpfs" + - "npipe" + - "cluster" + ReadOnly: + description: "Whether the mount should be read-only." + type: "boolean" + Consistency: + description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`." + type: "string" + BindOptions: + description: "Optional configuration for the `bind` type." + type: "object" + properties: + Propagation: + description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." + type: "string" + enum: + - "private" + - "rprivate" + - "shared" + - "rshared" + - "slave" + - "rslave" + NonRecursive: + description: "Disable recursive bind mount." + type: "boolean" + default: false + CreateMountpoint: + description: "Create mount point on host if missing" + type: "boolean" + default: false + ReadOnlyNonRecursive: + description: | + Make the mount non-recursively read-only, but still leave the mount recursive + (unless NonRecursive is set to `true` in conjunction). + + Added in v1.44, before that version all read-only mounts were + non-recursive by default. To match the previous behaviour this + will default to `true` for clients on versions prior to v1.44. + type: "boolean" + default: false + ReadOnlyForceRecursive: + description: "Raise an error if the mount cannot be made recursively read-only." + type: "boolean" + default: false + VolumeOptions: + description: "Optional configuration for the `volume` type." + type: "object" + properties: + NoCopy: + description: "Populate volume with data from the target." + type: "boolean" + default: false + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + DriverConfig: + description: "Map of driver specific options" + type: "object" + properties: + Name: + description: "Name of the driver to use to create the volume." + type: "string" + Options: + description: "key/value map of driver specific options." + type: "object" + additionalProperties: + type: "string" + Subpath: + description: "Source path inside the volume. Must be relative without any back traversals." + type: "string" + example: "dir-inside-volume/subdirectory" + ImageOptions: + description: "Optional configuration for the `image` type." + type: "object" + properties: + Subpath: + description: "Source path inside the image. Must be relative without any back traversals." + type: "string" + example: "dir-inside-image/subdirectory" + TmpfsOptions: + description: "Optional configuration for the `tmpfs` type." + type: "object" + properties: + SizeBytes: + description: "The size for the tmpfs mount in bytes." + type: "integer" + format: "int64" + Mode: + description: "The permission mode for the tmpfs mount in an integer." + type: "integer" + Options: + description: | + The options to be passed to the tmpfs mount. An array of arrays. + Flag options should be provided as 1-length arrays. Other types + should be provided as as 2-length arrays, where the first item is + the key and the second the value. + type: "array" + items: + type: "array" + minItems: 1 + maxItems: 2 + items: + type: "string" + example: + [["noexec"]] + + RestartPolicy: + description: | + The behavior to apply when the container exits. The default is not to + restart. + + An ever increasing delay (double the previous delay, starting at 100ms) is + added before each restart to prevent flooding the server. + type: "object" + properties: + Name: + type: "string" + description: | + - Empty string means not to restart + - `no` Do not automatically restart + - `always` Always restart + - `unless-stopped` Restart always except when the user has manually stopped the container + - `on-failure` Restart only when the container exit code is non-zero + enum: + - "" + - "no" + - "always" + - "unless-stopped" + - "on-failure" + MaximumRetryCount: + type: "integer" + description: | + If `on-failure` is used, the number of times to retry before giving up. + + Resources: + description: "A container's resources (cgroups config, ulimits, etc)" + type: "object" + properties: + # Applicable to all platforms + CpuShares: + description: | + An integer value representing this container's relative CPU weight + versus other containers. + type: "integer" + Memory: + description: "Memory limit in bytes." + type: "integer" + format: "int64" + default: 0 + # Applicable to UNIX platforms + CgroupParent: + description: | + Path to `cgroups` under which the container's `cgroup` is created. If + the path is not absolute, the path is considered to be relative to the + `cgroups` path of the init process. Cgroups are created if they do not + already exist. + type: "string" + BlkioWeight: + description: "Block IO weight (relative weight)." + type: "integer" + minimum: 0 + maximum: 1000 + BlkioWeightDevice: + description: | + Block IO weight (relative device weight) in the form: + + ``` + [{"Path": "device_path", "Weight": weight}] + ``` + type: "array" + items: + type: "object" + properties: + Path: + type: "string" + Weight: + type: "integer" + minimum: 0 + BlkioDeviceReadBps: + description: | + Limit read rate (bytes per second) from a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteBps: + description: | + Limit write rate (bytes per second) to a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceReadIOps: + description: | + Limit read rate (IO per second) from a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteIOps: + description: | + Limit write rate (IO per second) to a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + CpuPeriod: + description: "The length of a CPU period in microseconds." + type: "integer" + format: "int64" + CpuQuota: + description: | + Microseconds of CPU time that the container can get in a CPU period. + type: "integer" + format: "int64" + CpuRealtimePeriod: + description: | + The length of a CPU real-time period in microseconds. Set to 0 to + allocate no time allocated to real-time tasks. + type: "integer" + format: "int64" + CpuRealtimeRuntime: + description: | + The length of a CPU real-time runtime in microseconds. Set to 0 to + allocate no time allocated to real-time tasks. + type: "integer" + format: "int64" + CpusetCpus: + description: | + CPUs in which to allow execution (e.g., `0-3`, `0,1`). + type: "string" + example: "0-3" + CpusetMems: + description: | + Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only + effective on NUMA systems. + type: "string" + Devices: + description: "A list of devices to add to the container." + type: "array" + items: + $ref: "#/definitions/DeviceMapping" + DeviceCgroupRules: + description: "a list of cgroup rules to apply to the container" + type: "array" + items: + type: "string" + example: "c 13:* rwm" + DeviceRequests: + description: | + A list of requests for devices to be sent to device drivers. + type: "array" + items: + $ref: "#/definitions/DeviceRequest" + KernelMemoryTCP: + description: | + Hard limit for kernel TCP buffer memory (in bytes). Depending on the + OCI runtime in use, this option may be ignored. It is no longer supported + by the default (runc) runtime. + + This field is omitted when empty. + type: "integer" + format: "int64" + MemoryReservation: + description: "Memory soft limit in bytes." + type: "integer" + format: "int64" + MemorySwap: + description: | + Total memory limit (memory + swap). Set as `-1` to enable unlimited + swap. + type: "integer" + format: "int64" + MemorySwappiness: + description: | + Tune a container's memory swappiness behavior. Accepts an integer + between 0 and 100. + type: "integer" + format: "int64" + minimum: 0 + maximum: 100 + NanoCpus: + description: "CPU quota in units of 10-9 CPUs." + type: "integer" + format: "int64" + OomKillDisable: + description: "Disable OOM Killer for the container." + type: "boolean" + Init: + description: | + Run an init inside the container that forwards signals and reaps + processes. This field is omitted if empty, and the default (as + configured on the daemon) is used. + type: "boolean" + x-nullable: true + PidsLimit: + description: | + Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` + to not change. + type: "integer" + format: "int64" + x-nullable: true + Ulimits: + description: | + A list of resource limits to set in the container. For example: + + ``` + {"Name": "nofile", "Soft": 1024, "Hard": 2048} + ``` + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + # Applicable to Windows + CpuCount: + description: | + The number of usable CPUs (Windows only). + + On Windows Server containers, the processor resource controls are + mutually exclusive. The order of precedence is `CPUCount` first, then + `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + CpuPercent: + description: | + The usable percentage of the available CPUs (Windows only). + + On Windows Server containers, the processor resource controls are + mutually exclusive. The order of precedence is `CPUCount` first, then + `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + IOMaximumIOps: + description: "Maximum IOps for the container system drive (Windows only)" + type: "integer" + format: "int64" + IOMaximumBandwidth: + description: | + Maximum IO in bytes per second for the container system drive + (Windows only). + type: "integer" + format: "int64" + + Limit: + description: | + An object describing a limit on resources which can be requested by a task. + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + example: 4000000000 + MemoryBytes: + type: "integer" + format: "int64" + example: 8272408576 + Pids: + description: | + Limits the maximum number of PIDs in the container. Set `0` for unlimited. + type: "integer" + format: "int64" + default: 0 + example: 100 + + ResourceObject: + description: | + An object describing the resources which can be advertised by a node and + requested by a task. + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + example: 4000000000 + MemoryBytes: + type: "integer" + format: "int64" + example: 8272408576 + GenericResources: + $ref: "#/definitions/GenericResources" + + GenericResources: + description: | + User-defined resources can be either Integer resources (e.g, `SSD=3`) or + String resources (e.g, `GPU=UUID1`). + type: "array" + items: + type: "object" + properties: + NamedResourceSpec: + type: "object" + properties: + Kind: + type: "string" + Value: + type: "string" + DiscreteResourceSpec: + type: "object" + properties: + Kind: + type: "string" + Value: + type: "integer" + format: "int64" + example: + - DiscreteResourceSpec: + Kind: "SSD" + Value: 3 + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID1" + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID2" + + HealthConfig: + description: "A test to perform to check that the container is healthy." + type: "object" + properties: + Test: + description: | + The test to perform. Possible values are: + + - `[]` inherit healthcheck from image or parent image + - `["NONE"]` disable healthcheck + - `["CMD", args...]` exec arguments directly + - `["CMD-SHELL", command]` run command with system's default shell + type: "array" + items: + type: "string" + Interval: + description: | + The time to wait between checks in nanoseconds. It should be 0 or at + least 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + Timeout: + description: | + The time to wait before considering the check to have hung. It should + be 0 or at least 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + Retries: + description: | + The number of consecutive failures needed to consider a container as + unhealthy. 0 means inherit. + type: "integer" + StartPeriod: + description: | + Start period for the container to initialize before starting + health-retries countdown in nanoseconds. It should be 0 or at least + 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + StartInterval: + description: | + The time to wait between checks in nanoseconds during the start period. + It should be 0 or at least 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + + Health: + description: | + Health stores information about the container's healthcheck results. + type: "object" + x-nullable: true + properties: + Status: + description: | + Status is one of `none`, `starting`, `healthy` or `unhealthy` + + - "none" Indicates there is no healthcheck + - "starting" Starting indicates that the container is not yet ready + - "healthy" Healthy indicates that the container is running correctly + - "unhealthy" Unhealthy indicates that the container has a problem + type: "string" + enum: + - "none" + - "starting" + - "healthy" + - "unhealthy" + example: "healthy" + FailingStreak: + description: "FailingStreak is the number of consecutive failures" + type: "integer" + example: 0 + Log: + type: "array" + description: | + Log contains the last few results (oldest first) + items: + $ref: "#/definitions/HealthcheckResult" + + HealthcheckResult: + description: | + HealthcheckResult stores information about a single run of a healthcheck probe + type: "object" + x-nullable: true + properties: + Start: + description: | + Date and time at which this check started in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "date-time" + example: "2020-01-04T10:44:24.496525531Z" + End: + description: | + Date and time at which this check ended in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2020-01-04T10:45:21.364524523Z" + ExitCode: + description: | + ExitCode meanings: + + - `0` healthy + - `1` unhealthy + - `2` reserved (considered unhealthy) + - other values: error running probe + type: "integer" + example: 0 + Output: + description: "Output from last check" + type: "string" + + HostConfig: + description: "Container configuration that depends on the host we are running on" + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + # Applicable to all platforms + Binds: + type: "array" + description: | + A list of volume bindings for this container. Each volume binding + is a string in one of these forms: + + - `host-src:container-dest[:options]` to bind-mount a host path + into the container. Both `host-src`, and `container-dest` must + be an _absolute_ path. + - `volume-name:container-dest[:options]` to bind-mount a volume + managed by a volume driver into the container. `container-dest` + must be an _absolute_ path. + + `options` is an optional, comma-delimited list of: + + - `nocopy` disables automatic copying of data from the container + path to the volume. The `nocopy` flag only applies to named volumes. + - `[ro|rw]` mounts a volume read-only or read-write, respectively. + If omitted or set to `rw`, volumes are mounted read-write. + - `[z|Z]` applies SELinux labels to allow or deny multiple containers + to read and write to the same volume. + - `z`: a _shared_ content label is applied to the content. This + label indicates that multiple containers can share the volume + content, for both reading and writing. + - `Z`: a _private unshared_ label is applied to the content. + This label indicates that only the current container can use + a private volume. Labeling systems such as SELinux require + proper labels to be placed on volume content that is mounted + into a container. Without a label, the security system can + prevent a container's processes from using the content. By + default, the labels set by the host operating system are not + modified. + - `[[r]shared|[r]slave|[r]private]` specifies mount + [propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt). + This only applies to bind-mounted volumes, not internal volumes + or named volumes. Mount propagation requires the source mount + point (the location where the source directory is mounted in the + host operating system) to have the correct propagation properties. + For shared volumes, the source mount point must be set to `shared`. + For slave volumes, the mount must be set to either `shared` or + `slave`. + items: + type: "string" + ContainerIDFile: + type: "string" + description: "Path to a file where the container ID is written" + example: "" + LogConfig: + type: "object" + description: "The logging configuration for this container" + properties: + Type: + description: |- + Name of the logging driver used for the container or "none" + if logging is disabled. + type: "string" + enum: + - "local" + - "json-file" + - "syslog" + - "journald" + - "gelf" + - "fluentd" + - "awslogs" + - "splunk" + - "etwlogs" + - "none" + Config: + description: |- + Driver-specific configuration options for the logging driver. + type: "object" + additionalProperties: + type: "string" + example: + "max-file": "5" + "max-size": "10m" + NetworkMode: + type: "string" + description: | + Network mode to use for this container. Supported standard values + are: `bridge`, `host`, `none`, and `container:`. Any + other value is taken as a custom network's name to which this + container should connect to. + PortBindings: + $ref: "#/definitions/PortMap" + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + AutoRemove: + type: "boolean" + description: | + Automatically remove the container when the container's process + exits. This has no effect if `RestartPolicy` is set. + VolumeDriver: + type: "string" + description: "Driver that this container uses to mount volumes." + VolumesFrom: + type: "array" + description: | + A list of volumes to inherit from another container, specified in + the form `[:]`. + items: + type: "string" + Mounts: + description: | + Specification for mounts to be added to the container. + type: "array" + items: + $ref: "#/definitions/Mount" + ConsoleSize: + type: "array" + description: | + Initial console size, as an `[height, width]` array. + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + example: [80, 64] + Annotations: + type: "object" + description: | + Arbitrary non-identifying metadata attached to container and + provided to the runtime when the container is started. + additionalProperties: + type: "string" + + # Applicable to UNIX platforms + CapAdd: + type: "array" + description: | + A list of kernel capabilities to add to the container. Conflicts + with option 'Capabilities'. + items: + type: "string" + CapDrop: + type: "array" + description: | + A list of kernel capabilities to drop from the container. Conflicts + with option 'Capabilities'. + items: + type: "string" + CgroupnsMode: + type: "string" + enum: + - "private" + - "host" + description: | + cgroup namespace mode for the container. Possible values are: + + - `"private"`: the container runs in its own private cgroup namespace + - `"host"`: use the host system's cgroup namespace + + If not specified, the daemon default is used, which can either be `"private"` + or `"host"`, depending on daemon version, kernel support and configuration. + Dns: + type: "array" + description: "A list of DNS servers for the container to use." + items: + type: "string" + DnsOptions: + type: "array" + description: "A list of DNS options." + items: + type: "string" + DnsSearch: + type: "array" + description: "A list of DNS search domains." + items: + type: "string" + ExtraHosts: + type: "array" + description: | + A list of hostnames/IP mappings to add to the container's `/etc/hosts` + file. Specified in the form `["hostname:IP"]`. + items: + type: "string" + GroupAdd: + type: "array" + description: | + A list of additional groups that the container process will run as. + items: + type: "string" + IpcMode: + type: "string" + description: | + IPC sharing mode for the container. Possible values are: + + - `"none"`: own private IPC namespace, with /dev/shm not mounted + - `"private"`: own private IPC namespace + - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers + - `"container:"`: join another (shareable) container's IPC namespace + - `"host"`: use the host system's IPC namespace + + If not specified, daemon default is used, which can either be `"private"` + or `"shareable"`, depending on daemon version and configuration. + Cgroup: + type: "string" + description: "Cgroup to use for the container." + Links: + type: "array" + description: | + A list of links for the container in the form `container_name:alias`. + items: + type: "string" + OomScoreAdj: + type: "integer" + description: | + An integer value containing the score given to the container in + order to tune OOM killer preferences. + example: 500 + PidMode: + type: "string" + description: | + Set the PID (Process) Namespace mode for the container. It can be + either: + + - `"container:"`: joins another container's PID namespace + - `"host"`: use the host's PID namespace inside the container + Privileged: + type: "boolean" + description: |- + Gives the container full access to the host. + PublishAllPorts: + type: "boolean" + description: | + Allocates an ephemeral host port for all of a container's + exposed ports. + + Ports are de-allocated when the container stops and allocated when + the container starts. The allocated port might be changed when + restarting the container. + + The port is selected from the ephemeral port range that depends on + the kernel. For example, on Linux the range is defined by + `/proc/sys/net/ipv4/ip_local_port_range`. + ReadonlyRootfs: + type: "boolean" + description: "Mount the container's root filesystem as read only." + SecurityOpt: + type: "array" + description: | + A list of string values to customize labels for MLS systems, such + as SELinux. + items: + type: "string" + StorageOpt: + type: "object" + description: | + Storage driver options for this container, in the form `{"size": "120G"}`. + additionalProperties: + type: "string" + Tmpfs: + type: "object" + description: | + A map of container directories which should be replaced by tmpfs + mounts, and their corresponding mount options. For example: + + ``` + { "/run": "rw,noexec,nosuid,size=65536k" } + ``` + additionalProperties: + type: "string" + UTSMode: + type: "string" + description: "UTS namespace to use for the container." + UsernsMode: + type: "string" + description: | + Sets the usernamespace mode for the container when usernamespace + remapping option is enabled. + ShmSize: + type: "integer" + format: "int64" + description: | + Size of `/dev/shm` in bytes. If omitted, the system uses 64MB. + minimum: 0 + Sysctls: + type: "object" + x-nullable: true + description: |- + A list of kernel parameters (sysctls) to set in the container. + + This field is omitted if not set. + additionalProperties: + type: "string" + example: + "net.ipv4.ip_forward": "1" + Runtime: + type: "string" + x-nullable: true + description: |- + Runtime to use with this container. + # Applicable to Windows + Isolation: + type: "string" + description: | + Isolation technology of the container. (Windows only) + enum: + - "default" + - "process" + - "hyperv" + - "" + MaskedPaths: + type: "array" + description: | + The list of paths to be masked inside the container (this overrides + the default set of paths). + items: + type: "string" + example: + - "/proc/asound" + - "/proc/acpi" + - "/proc/kcore" + - "/proc/keys" + - "/proc/latency_stats" + - "/proc/timer_list" + - "/proc/timer_stats" + - "/proc/sched_debug" + - "/proc/scsi" + - "/sys/firmware" + - "/sys/devices/virtual/powercap" + ReadonlyPaths: + type: "array" + description: | + The list of paths to be set as read-only inside the container + (this overrides the default set of paths). + items: + type: "string" + example: + - "/proc/bus" + - "/proc/fs" + - "/proc/irq" + - "/proc/sys" + - "/proc/sysrq-trigger" + + ContainerConfig: + description: | + Configuration for a container that is portable between hosts. + type: "object" + properties: + Hostname: + description: | + The hostname to use for the container, as a valid RFC 1123 hostname. + type: "string" + example: "439f4e91bd1d" + Domainname: + description: | + The domain name to use for the container. + type: "string" + User: + description: |- + Commands run as this user inside the container. If omitted, commands + run as the user specified in the image the container was started from. + + Can be either user-name or UID, and optional group-name or GID, + separated by a colon (`[<:group-name|GID>]`). + type: "string" + example: "123:456" + AttachStdin: + description: "Whether to attach to `stdin`." + type: "boolean" + default: false + AttachStdout: + description: "Whether to attach to `stdout`." + type: "boolean" + default: true + AttachStderr: + description: "Whether to attach to `stderr`." + type: "boolean" + default: true + ExposedPorts: + description: | + An object mapping ports to an empty object in the form: + + `{"/": {}}` + type: "object" + x-nullable: true + additionalProperties: + type: "object" + enum: + - {} + default: {} + example: { + "80/tcp": {}, + "443/tcp": {} + } + Tty: + description: | + Attach standard streams to a TTY, including `stdin` if it is not closed. + type: "boolean" + default: false + OpenStdin: + description: "Open `stdin`" + type: "boolean" + default: false + StdinOnce: + description: "Close `stdin` after one attached client disconnects" + type: "boolean" + default: false + Env: + description: | + A list of environment variables to set inside the container in the + form `["VAR=value", ...]`. A variable without `=` is removed from the + environment, rather than to have an empty value. + type: "array" + items: + type: "string" + example: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Cmd: + description: | + Command to run specified as a string or an array of strings. + type: "array" + items: + type: "string" + example: ["/bin/sh"] + Healthcheck: + $ref: "#/definitions/HealthConfig" + ArgsEscaped: + description: "Command is already escaped (Windows only)" + type: "boolean" + default: false + example: false + x-nullable: true + Image: + description: | + The name (or reference) of the image to use when creating the container, + or which was used when the container was created. + type: "string" + example: "example-image:1.0" + Volumes: + description: | + An object mapping mount point paths inside the container to empty + objects. + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + WorkingDir: + description: "The working directory for commands to run in." + type: "string" + example: "/public/" + Entrypoint: + description: | + The entry point for the container as a string or an array of strings. + + If the array consists of exactly one empty string (`[""]`) then the + entry point is reset to system default (i.e., the entry point used by + docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + type: "array" + items: + type: "string" + example: [] + NetworkDisabled: + description: "Disable networking for the container." + type: "boolean" + x-nullable: true + MacAddress: + description: | + MAC address of the container. + + Deprecated: this field is deprecated in API v1.44 and up. Use EndpointSettings.MacAddress instead. + type: "string" + x-nullable: true + OnBuild: + description: | + `ONBUILD` metadata that were defined in the image's `Dockerfile`. + type: "array" + x-nullable: true + items: + type: "string" + example: [] + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + StopSignal: + description: | + Signal to stop a container as a string or unsigned integer. + type: "string" + example: "SIGTERM" + x-nullable: true + StopTimeout: + description: "Timeout to stop a container in seconds." + type: "integer" + default: 10 + x-nullable: true + Shell: + description: | + Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. + type: "array" + x-nullable: true + items: + type: "string" + example: ["/bin/sh", "-c"] + + ImageConfig: + description: | + Configuration of the image. These fields are used as defaults + when starting a container from the image. + type: "object" + properties: + User: + description: "The user that commands are run as inside the container." + type: "string" + example: "web:web" + ExposedPorts: + description: | + An object mapping ports to an empty object in the form: + + `{"/": {}}` + type: "object" + x-nullable: true + additionalProperties: + type: "object" + enum: + - {} + default: {} + example: { + "80/tcp": {}, + "443/tcp": {} + } + Env: + description: | + A list of environment variables to set inside the container in the + form `["VAR=value", ...]`. A variable without `=` is removed from the + environment, rather than to have an empty value. + type: "array" + items: + type: "string" + example: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Cmd: + description: | + Command to run specified as a string or an array of strings. + type: "array" + items: + type: "string" + example: ["/bin/sh"] + Healthcheck: + $ref: "#/definitions/HealthConfig" + ArgsEscaped: + description: "Command is already escaped (Windows only)" + type: "boolean" + default: false + example: false + x-nullable: true + Volumes: + description: | + An object mapping mount point paths inside the container to empty + objects. + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + example: + "/app/data": {} + "/app/config": {} + WorkingDir: + description: "The working directory for commands to run in." + type: "string" + example: "/public/" + Entrypoint: + description: | + The entry point for the container as a string or an array of strings. + + If the array consists of exactly one empty string (`[""]`) then the + entry point is reset to system default (i.e., the entry point used by + docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + type: "array" + items: + type: "string" + example: [] + OnBuild: + description: | + `ONBUILD` metadata that were defined in the image's `Dockerfile`. + type: "array" + x-nullable: true + items: + type: "string" + example: [] + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + StopSignal: + description: | + Signal to stop a container as a string or unsigned integer. + type: "string" + example: "SIGTERM" + x-nullable: true + Shell: + description: | + Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. + type: "array" + x-nullable: true + items: + type: "string" + example: ["/bin/sh", "-c"] + # FIXME(thaJeztah): temporarily using a full example to remove some "omitempty" fields. Remove once the fields are removed. + example: + "User": "web:web" + "ExposedPorts": { + "80/tcp": {}, + "443/tcp": {} + } + "Env": ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"] + "Cmd": ["/bin/sh"] + "Healthcheck": { + "Test": ["string"], + "Interval": 0, + "Timeout": 0, + "Retries": 0, + "StartPeriod": 0, + "StartInterval": 0 + } + "ArgsEscaped": true + "Volumes": { + "/app/data": {}, + "/app/config": {} + } + "WorkingDir": "/public/" + "Entrypoint": [] + "OnBuild": [] + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } + "StopSignal": "SIGTERM" + "Shell": ["/bin/sh", "-c"] + + NetworkingConfig: + description: | + NetworkingConfig represents the container's networking configuration for + each of its interfaces. + It is used for the networking configs specified in the `docker create` + and `docker network connect` commands. + type: "object" + properties: + EndpointsConfig: + description: | + A mapping of network name to endpoint configuration for that network. + The endpoint configuration can be left empty to connect to that + network with no particular endpoint configuration. + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + example: + # putting an example here, instead of using the example values from + # /definitions/EndpointSettings, because EndpointSettings contains + # operational data returned when inspecting a container that we don't + # accept here. + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + MacAddress: "02:42:ac:12:05:02" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + database_nw: {} + + NetworkSettings: + description: "NetworkSettings exposes the network settings in the API" + type: "object" + properties: + Bridge: + description: | + Name of the default bridge interface when dockerd's --bridge flag is set. + type: "string" + example: "docker0" + SandboxID: + description: SandboxID uniquely represents a container's network stack. + type: "string" + example: "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3" + HairpinMode: + description: | + Indicates if hairpin NAT should be enabled on the virtual interface. + + Deprecated: This field is never set and will be removed in a future release. + type: "boolean" + example: false + LinkLocalIPv6Address: + description: | + IPv6 unicast address using the link-local prefix. + + Deprecated: This field is never set and will be removed in a future release. + type: "string" + example: "" + LinkLocalIPv6PrefixLen: + description: | + Prefix length of the IPv6 unicast address. + + Deprecated: This field is never set and will be removed in a future release. + type: "integer" + example: "" + Ports: + $ref: "#/definitions/PortMap" + SandboxKey: + description: SandboxKey is the full path of the netns handle + type: "string" + example: "/var/run/docker/netns/8ab54b426c38" + + SecondaryIPAddresses: + description: "Deprecated: This field is never set and will be removed in a future release." + type: "array" + items: + $ref: "#/definitions/Address" + x-nullable: true + + SecondaryIPv6Addresses: + description: "Deprecated: This field is never set and will be removed in a future release." + type: "array" + items: + $ref: "#/definitions/Address" + x-nullable: true + + # TODO properties below are part of DefaultNetworkSettings, which is + # marked as deprecated since Docker 1.9 and to be removed in Docker v17.12 + EndpointID: + description: | + EndpointID uniquely represents a service endpoint in a Sandbox. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" + Gateway: + description: | + Gateway address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "172.17.0.1" + GlobalIPv6Address: + description: | + Global IPv6 address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "2001:db8::5689" + GlobalIPv6PrefixLen: + description: | + Mask length of the global IPv6 address. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "integer" + example: 64 + IPAddress: + description: | + IPv4 address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "172.17.0.4" + IPPrefixLen: + description: | + Mask length of the IPv4 address. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "integer" + example: 16 + IPv6Gateway: + description: | + IPv6 gateway address for this network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "2001:db8:2::100" + MacAddress: + description: | + MAC address for the container on the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "02:42:ac:11:00:04" + Networks: + description: | + Information about all networks that the container is connected to. + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + + Address: + description: Address represents an IPv4 or IPv6 IP address. + type: "object" + properties: + Addr: + description: IP address. + type: "string" + PrefixLen: + description: Mask length of the IP address. + type: "integer" + + PortMap: + description: | + PortMap describes the mapping of container ports to host ports, using the + container's port-number and protocol as key in the format `/`, + for example, `80/udp`. + + If a container's port is mapped for multiple protocols, separate entries + are added to the mapping table. + type: "object" + additionalProperties: + type: "array" + x-nullable: true + items: + $ref: "#/definitions/PortBinding" + example: + "443/tcp": + - HostIp: "127.0.0.1" + HostPort: "4443" + "80/tcp": + - HostIp: "0.0.0.0" + HostPort: "80" + - HostIp: "0.0.0.0" + HostPort: "8080" + "80/udp": + - HostIp: "0.0.0.0" + HostPort: "80" + "53/udp": + - HostIp: "0.0.0.0" + HostPort: "53" + "2377/tcp": null + + PortBinding: + description: | + PortBinding represents a binding between a host IP address and a host + port. + type: "object" + properties: + HostIp: + description: "Host IP address that the container's port is mapped to." + type: "string" + example: "127.0.0.1" + HostPort: + description: "Host port number that the container's port is mapped to." + type: "string" + example: "4443" + + DriverData: + description: | + Information about the storage driver used to store the container's and + image's filesystem. + type: "object" + required: [Name, Data] + properties: + Name: + description: "Name of the storage driver." + type: "string" + x-nullable: false + example: "overlay2" + Data: + description: | + Low-level storage metadata, provided as key/value pairs. + + This information is driver-specific, and depends on the storage-driver + in use, and should be used for informational purposes only. + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: { + "MergedDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/merged", + "UpperDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/diff", + "WorkDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/work" + } + + FilesystemChange: + description: | + Change in the container's filesystem. + type: "object" + required: [Path, Kind] + properties: + Path: + description: | + Path to file or directory that has changed. + type: "string" + x-nullable: false + Kind: + $ref: "#/definitions/ChangeType" + + ChangeType: + description: | + Kind of change + + Can be one of: + + - `0`: Modified ("C") + - `1`: Added ("A") + - `2`: Deleted ("D") + type: "integer" + format: "uint8" + enum: [0, 1, 2] + x-nullable: false + + ImageInspect: + description: | + Information about an image in the local image cache. + type: "object" + properties: + Id: + description: | + ID is the content-addressable ID of an image. + + This identifier is a content-addressable digest calculated from the + image's configuration (which includes the digests of layers used by + the image). + + Note that this digest differs from the `RepoDigests` below, which + holds digests of image manifests that reference the image. + type: "string" + x-nullable: false + example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" + Descriptor: + description: | + Descriptor is an OCI descriptor of the image target. + In case of a multi-platform image, this descriptor points to the OCI index + or a manifest list. + + This field is only present if the daemon provides a multi-platform image store. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + x-nullable: true + $ref: "#/definitions/OCIDescriptor" + Manifests: + description: | + Manifests is a list of image manifests available in this image. It + provides a more detailed view of the platform-specific image manifests or + other image-attached data like build attestations. + + Only available if the daemon provides a multi-platform image store + and the `manifests` option is set in the inspect request. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + type: "array" + x-nullable: true + items: + $ref: "#/definitions/ImageManifestSummary" + RepoTags: + description: | + List of image names/tags in the local image cache that reference this + image. + + Multiple image tags can refer to the same image, and this list may be + empty if no tags reference the image, in which case the image is + "untagged", in which case it can still be referenced by its ID. + type: "array" + items: + type: "string" + example: + - "example:1.0" + - "example:latest" + - "example:stable" + - "internal.registry.example.com:5000/example:1.0" + RepoDigests: + description: | + List of content-addressable digests of locally available image manifests + that the image is referenced from. Multiple manifests can refer to the + same image. + + These digests are usually only available if the image was either pulled + from a registry, or if the image was pushed to a registry, which is when + the manifest is generated and its digest calculated. + type: "array" + items: + type: "string" + example: + - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" + - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + Parent: + description: | + ID of the parent image. + + Depending on how the image was created, this field may be empty and + is only set for images that were built/created locally. This field + is empty if the image was pulled from an image registry. + type: "string" + x-nullable: false + example: "" + Comment: + description: | + Optional message that was set when committing or importing the image. + type: "string" + x-nullable: false + example: "" + Created: + description: | + Date and time at which the image was created, formatted in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + + This information is only available if present in the image, + and omitted otherwise. + type: "string" + format: "dateTime" + x-nullable: true + example: "2022-02-04T21:20:12.497794809Z" + DockerVersion: + description: | + The version of Docker that was used to build the image. + + Depending on how the image was created, this field may be empty. + type: "string" + x-nullable: false + example: "27.0.1" + Author: + description: | + Name of the author that was specified when committing the image, or as + specified through MAINTAINER (deprecated) in the Dockerfile. + type: "string" + x-nullable: false + example: "" + Config: + $ref: "#/definitions/ImageConfig" + Architecture: + description: | + Hardware CPU architecture that the image runs on. + type: "string" + x-nullable: false + example: "arm" + Variant: + description: | + CPU architecture variant (presently ARM-only). + type: "string" + x-nullable: true + example: "v7" + Os: + description: | + Operating System the image is built to run on. + type: "string" + x-nullable: false + example: "linux" + OsVersion: + description: | + Operating System version the image is built to run on (especially + for Windows). + type: "string" + example: "" + x-nullable: true + Size: + description: | + Total size of the image including all layers it is composed of. + type: "integer" + format: "int64" + x-nullable: false + example: 1239828 + VirtualSize: + description: | + Total size of the image including all layers it is composed of. + + Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. + type: "integer" + format: "int64" + example: 1239828 + GraphDriver: + $ref: "#/definitions/DriverData" + RootFS: + description: | + Information about the image's RootFS, including the layer IDs. + type: "object" + required: [Type] + properties: + Type: + type: "string" + x-nullable: false + example: "layers" + Layers: + type: "array" + items: + type: "string" + example: + - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" + - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + Metadata: + description: | + Additional metadata of the image in the local cache. This information + is local to the daemon, and not part of the image itself. + type: "object" + properties: + LastTagTime: + description: | + Date and time at which the image was last tagged in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + + This information is only available if the image was tagged locally, + and omitted otherwise. + type: "string" + format: "dateTime" + example: "2022-02-28T14:40:02.623929178Z" + x-nullable: true + + ImageSummary: + type: "object" + x-go-name: "Summary" + required: + - Id + - ParentId + - RepoTags + - RepoDigests + - Created + - Size + - SharedSize + - Labels + - Containers + properties: + Id: + description: | + ID is the content-addressable ID of an image. + + This identifier is a content-addressable digest calculated from the + image's configuration (which includes the digests of layers used by + the image). + + Note that this digest differs from the `RepoDigests` below, which + holds digests of image manifests that reference the image. + type: "string" + x-nullable: false + example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" + ParentId: + description: | + ID of the parent image. + + Depending on how the image was created, this field may be empty and + is only set for images that were built/created locally. This field + is empty if the image was pulled from an image registry. + type: "string" + x-nullable: false + example: "" + RepoTags: + description: | + List of image names/tags in the local image cache that reference this + image. + + Multiple image tags can refer to the same image, and this list may be + empty if no tags reference the image, in which case the image is + "untagged", in which case it can still be referenced by its ID. + type: "array" + x-nullable: false + items: + type: "string" + example: + - "example:1.0" + - "example:latest" + - "example:stable" + - "internal.registry.example.com:5000/example:1.0" + RepoDigests: + description: | + List of content-addressable digests of locally available image manifests + that the image is referenced from. Multiple manifests can refer to the + same image. + + These digests are usually only available if the image was either pulled + from a registry, or if the image was pushed to a registry, which is when + the manifest is generated and its digest calculated. + type: "array" + x-nullable: false + items: + type: "string" + example: + - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" + - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + Created: + description: | + Date and time at which the image was created as a Unix timestamp + (number of seconds since EPOCH). + type: "integer" + x-nullable: false + example: "1644009612" + Size: + description: | + Total size of the image including all layers it is composed of. + type: "integer" + format: "int64" + x-nullable: false + example: 172064416 + SharedSize: + description: | + Total size of image layers that are shared between this image and other + images. + + This size is not calculated by default. `-1` indicates that the value + has not been set / calculated. + type: "integer" + format: "int64" + x-nullable: false + example: 1239828 + VirtualSize: + description: |- + Total size of the image including all layers it is composed of. + + Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. + type: "integer" + format: "int64" + example: 172064416 + Labels: + description: "User-defined key/value metadata." + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Containers: + description: | + Number of containers using this image. Includes both stopped and running + containers. + + This size is not calculated by default, and depends on which API endpoint + is used. `-1` indicates that the value has not been set / calculated. + x-nullable: false + type: "integer" + example: 2 + Manifests: + description: | + Manifests is a list of manifests available in this image. + It provides a more detailed view of the platform-specific image manifests + or other image-attached data like build attestations. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + type: "array" + x-nullable: false + x-omitempty: true + items: + $ref: "#/definitions/ImageManifestSummary" + Descriptor: + description: | + Descriptor is an OCI descriptor of the image target. + In case of a multi-platform image, this descriptor points to the OCI index + or a manifest list. + + This field is only present if the daemon provides a multi-platform image store. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + x-nullable: true + $ref: "#/definitions/OCIDescriptor" + + AuthConfig: + type: "object" + properties: + username: + type: "string" + password: + type: "string" + email: + type: "string" + serveraddress: + type: "string" + example: + username: "hannibal" + password: "xxxx" + serveraddress: "https://index.docker.io/v1/" + + ProcessConfig: + type: "object" + properties: + privileged: + type: "boolean" + user: + type: "string" + tty: + type: "boolean" + entrypoint: + type: "string" + arguments: + type: "array" + items: + type: "string" + + Volume: + type: "object" + required: [Name, Driver, Mountpoint, Labels, Scope, Options] + properties: + Name: + type: "string" + description: "Name of the volume." + x-nullable: false + example: "tardis" + Driver: + type: "string" + description: "Name of the volume driver used by the volume." + x-nullable: false + example: "custom" + Mountpoint: + type: "string" + description: "Mount path of the volume on the host." + x-nullable: false + example: "/var/lib/docker/volumes/tardis" + CreatedAt: + type: "string" + format: "dateTime" + description: "Date/Time the volume was created." + example: "2016-06-07T20:31:11.853781916Z" + Status: + type: "object" + description: | + Low-level details about the volume, provided by the volume driver. + Details are returned as a map with key/value pairs: + `{"key":"value","key2":"value2"}`. + + The `Status` field is optional, and is omitted if the volume driver + does not support this feature. + additionalProperties: + type: "object" + example: + hello: "world" + Labels: + type: "object" + description: "User-defined key/value metadata." + x-nullable: false + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Scope: + type: "string" + description: | + The level at which the volume exists. Either `global` for cluster-wide, + or `local` for machine level. + default: "local" + x-nullable: false + enum: ["local", "global"] + example: "local" + ClusterVolume: + $ref: "#/definitions/ClusterVolume" + Options: + type: "object" + description: | + The driver specific options used when creating the volume. + additionalProperties: + type: "string" + example: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + UsageData: + type: "object" + x-nullable: true + x-go-name: "UsageData" + required: [Size, RefCount] + description: | + Usage details about the volume. This information is used by the + `GET /system/df` endpoint, and omitted in other endpoints. + properties: + Size: + type: "integer" + format: "int64" + default: -1 + description: | + Amount of disk space used by the volume (in bytes). This information + is only available for volumes created with the `"local"` volume + driver. For volumes created with other volume drivers, this field + is set to `-1` ("not available") + x-nullable: false + RefCount: + type: "integer" + format: "int64" + default: -1 + description: | + The number of containers referencing this volume. This field + is set to `-1` if the reference-count is not available. + x-nullable: false + + VolumeCreateOptions: + description: "Volume configuration" + type: "object" + title: "VolumeConfig" + x-go-name: "CreateOptions" + properties: + Name: + description: | + The new volume's name. If not specified, Docker generates a name. + type: "string" + x-nullable: false + example: "tardis" + Driver: + description: "Name of the volume driver to use." + type: "string" + default: "local" + x-nullable: false + example: "custom" + DriverOpts: + description: | + A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. + type: "object" + additionalProperties: + type: "string" + example: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + ClusterVolumeSpec: + $ref: "#/definitions/ClusterVolumeSpec" + + VolumeListResponse: + type: "object" + title: "VolumeListResponse" + x-go-name: "ListResponse" + description: "Volume list response" + properties: + Volumes: + type: "array" + description: "List of volumes" + items: + $ref: "#/definitions/Volume" + Warnings: + type: "array" + description: | + Warnings that occurred when fetching the list of volumes. + items: + type: "string" + example: [] + + Network: + type: "object" + properties: + Name: + description: | + Name of the network. + type: "string" + example: "my_network" + Id: + description: | + ID that uniquely identifies a network on a single machine. + type: "string" + example: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" + Created: + description: | + Date and time at which the network was created in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-10-19T04:33:30.360899459Z" + Scope: + description: | + The level at which the network exists (e.g. `swarm` for cluster-wide + or `local` for machine level) + type: "string" + example: "local" + Driver: + description: | + The name of the driver used to create the network (e.g. `bridge`, + `overlay`). + type: "string" + example: "overlay" + EnableIPv4: + description: | + Whether the network was created with IPv4 enabled. + type: "boolean" + example: true + EnableIPv6: + description: | + Whether the network was created with IPv6 enabled. + type: "boolean" + example: false + IPAM: + $ref: "#/definitions/IPAM" + Internal: + description: | + Whether the network is created to only allow internal networking + connectivity. + type: "boolean" + default: false + example: false + Attachable: + description: | + Whether a global / swarm scope network is manually attachable by regular + containers from workers in swarm mode. + type: "boolean" + default: false + example: false + Ingress: + description: | + Whether the network is providing the routing-mesh for the swarm cluster. + type: "boolean" + default: false + example: false + ConfigFrom: + $ref: "#/definitions/ConfigReference" + ConfigOnly: + description: | + Whether the network is a config-only network. Config-only networks are + placeholder networks for network configurations to be used by other + networks. Config-only networks cannot be used directly to run containers + or services. + type: "boolean" + default: false + Containers: + description: | + Contains endpoints attached to the network. + type: "object" + additionalProperties: + $ref: "#/definitions/NetworkContainer" + example: + 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: + Name: "test" + EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" + MacAddress: "02:42:ac:13:00:02" + IPv4Address: "172.19.0.2/16" + IPv6Address: "" + Options: + description: | + Network-specific options uses when creating the network. + type: "object" + additionalProperties: + type: "string" + example: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Peers: + description: | + List of peer nodes for an overlay network. This field is only present + for overlay networks, and omitted for other network types. + type: "array" + items: + $ref: "#/definitions/PeerInfo" + x-nullable: true + # TODO: Add Services (only present when "verbose" is set). + + ConfigReference: + description: | + The config-only network source to provide the configuration for + this network. + type: "object" + properties: + Network: + description: | + The name of the config-only network that provides the network's + configuration. The specified network must be an existing config-only + network. Only network names are allowed, not network IDs. + type: "string" + example: "config_only_network_01" + + IPAM: + type: "object" + properties: + Driver: + description: "Name of the IPAM driver to use." + type: "string" + default: "default" + example: "default" + Config: + description: | + List of IPAM configuration options, specified as a map: + + ``` + {"Subnet": , "IPRange": , "Gateway": , "AuxAddress": } + ``` + type: "array" + items: + $ref: "#/definitions/IPAMConfig" + Options: + description: "Driver-specific options, specified as a map." + type: "object" + additionalProperties: + type: "string" + example: + foo: "bar" + + IPAMConfig: + type: "object" + properties: + Subnet: + type: "string" + example: "172.20.0.0/16" + IPRange: + type: "string" + example: "172.20.10.0/24" + Gateway: + type: "string" + example: "172.20.10.11" + AuxiliaryAddresses: + type: "object" + additionalProperties: + type: "string" + + NetworkContainer: + type: "object" + properties: + Name: + type: "string" + example: "container_1" + EndpointID: + type: "string" + example: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" + MacAddress: + type: "string" + example: "02:42:ac:13:00:02" + IPv4Address: + type: "string" + example: "172.19.0.2/16" + IPv6Address: + type: "string" + example: "" + + PeerInfo: + description: | + PeerInfo represents one peer of an overlay network. + type: "object" + properties: + Name: + description: + ID of the peer-node in the Swarm cluster. + type: "string" + example: "6869d7c1732b" + IP: + description: + IP-address of the peer-node in the Swarm cluster. + type: "string" + example: "10.133.77.91" + + NetworkCreateResponse: + description: "OK response to NetworkCreate operation" + type: "object" + title: "NetworkCreateResponse" + x-go-name: "CreateResponse" + required: [Id, Warning] + properties: + Id: + description: "The ID of the created network." + type: "string" + x-nullable: false + example: "b5c4fc71e8022147cd25de22b22173de4e3b170134117172eb595cb91b4e7e5d" + Warning: + description: "Warnings encountered when creating the container" + type: "string" + x-nullable: false + example: "" + + BuildInfo: + type: "object" + properties: + id: + type: "string" + stream: + type: "string" + error: + type: "string" + x-nullable: true + description: |- + errors encountered during the operation. + + + > **Deprecated**: This field is deprecated since API v1.4, and will be omitted in a future API version. Use the information in errorDetail instead. + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + x-nullable: true + description: |- + Progress is a pre-formatted presentation of progressDetail. + + + > **Deprecated**: This field is deprecated since API v1.8, and will be omitted in a future API version. Use the information in progressDetail instead. + progressDetail: + $ref: "#/definitions/ProgressDetail" + aux: + $ref: "#/definitions/ImageID" + + BuildCache: + type: "object" + description: | + BuildCache contains information about a build cache record. + properties: + ID: + type: "string" + description: | + Unique ID of the build cache record. + example: "ndlpt0hhvkqcdfkputsk4cq9c" + Parent: + description: | + ID of the parent build cache record. + + > **Deprecated**: This field is deprecated, and omitted if empty. + type: "string" + x-nullable: true + example: "" + Parents: + description: | + List of parent build cache record IDs. + type: "array" + items: + type: "string" + x-nullable: true + example: ["hw53o5aio51xtltp5xjp8v7fx"] + Type: + type: "string" + description: | + Cache record type. + example: "regular" + # see https://github.com/moby/buildkit/blob/fce4a32258dc9d9664f71a4831d5de10f0670677/client/diskusage.go#L75-L84 + enum: + - "internal" + - "frontend" + - "source.local" + - "source.git.checkout" + - "exec.cachemount" + - "regular" + Description: + type: "string" + description: | + Description of the build-step that produced the build cache. + example: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" + InUse: + type: "boolean" + description: | + Indicates if the build cache is in use. + example: false + Shared: + type: "boolean" + description: | + Indicates if the build cache is shared. + example: true + Size: + description: | + Amount of disk space used by the build cache (in bytes). + type: "integer" + example: 51 + CreatedAt: + description: | + Date and time at which the build cache was created in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + LastUsedAt: + description: | + Date and time at which the build cache was last used in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + x-nullable: true + example: "2017-08-09T07:09:37.632105588Z" + UsageCount: + type: "integer" + example: 26 + + ImageID: + type: "object" + description: "Image ID or Digest" + properties: + ID: + type: "string" + example: + ID: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" + + CreateImageInfo: + type: "object" + properties: + id: + type: "string" + error: + type: "string" + x-nullable: true + description: |- + errors encountered during the operation. + + + > **Deprecated**: This field is deprecated since API v1.4, and will be omitted in a future API version. Use the information in errorDetail instead. + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + x-nullable: true + description: |- + Progress is a pre-formatted presentation of progressDetail. + + + > **Deprecated**: This field is deprecated since API v1.8, and will be omitted in a future API version. Use the information in progressDetail instead. + progressDetail: + $ref: "#/definitions/ProgressDetail" + + PushImageInfo: + type: "object" + properties: + error: + type: "string" + x-nullable: true + description: |- + errors encountered during the operation. + + + > **Deprecated**: This field is deprecated since API v1.4, and will be omitted in a future API version. Use the information in errorDetail instead. + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + x-nullable: true + description: |- + Progress is a pre-formatted presentation of progressDetail. + + + > **Deprecated**: This field is deprecated since API v1.8, and will be omitted in a future API version. Use the information in progressDetail instead. + progressDetail: + $ref: "#/definitions/ProgressDetail" + + DeviceInfo: + type: "object" + description: | + DeviceInfo represents a device that can be used by a container. + properties: + Source: + type: "string" + example: "cdi" + description: | + The origin device driver. + ID: + type: "string" + example: "vendor.com/gpu=0" + description: | + The unique identifier for the device within its source driver. + For CDI devices, this would be an FQDN like "vendor.com/gpu=0". + + ErrorDetail: + type: "object" + properties: + code: + type: "integer" + message: + type: "string" + + ProgressDetail: + type: "object" + properties: + current: + type: "integer" + total: + type: "integer" + + ErrorResponse: + description: "Represents an error." + type: "object" + required: ["message"] + properties: + message: + description: "The error message." + type: "string" + x-nullable: false + example: + message: "Something went wrong." + + IDResponse: + description: "Response to an API call that returns just an Id" + type: "object" + x-go-name: "IDResponse" + required: ["Id"] + properties: + Id: + description: "The id of the newly created object." + type: "string" + x-nullable: false + + EndpointSettings: + description: "Configuration for a network endpoint." + type: "object" + properties: + # Configurations + IPAMConfig: + $ref: "#/definitions/EndpointIPAMConfig" + Links: + type: "array" + items: + type: "string" + example: + - "container_1" + - "container_2" + MacAddress: + description: | + MAC address for the endpoint on this network. The network driver might ignore this parameter. + type: "string" + example: "02:42:ac:11:00:04" + Aliases: + type: "array" + items: + type: "string" + example: + - "server_x" + - "server_y" + DriverOpts: + description: | + DriverOpts is a mapping of driver options and values. These options + are passed directly to the driver and are driver specific. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + GwPriority: + description: | + This property determines which endpoint will provide the default + gateway for a container. The endpoint with the highest priority will + be used. If multiple endpoints have the same priority, endpoints are + lexicographically sorted based on their network name, and the one + that sorts first is picked. + type: "integer" + format: "int64" + example: + - 10 + + # Operational data + NetworkID: + description: | + Unique ID of the network. + type: "string" + example: "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a" + EndpointID: + description: | + Unique ID for the service endpoint in a Sandbox. + type: "string" + example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" + Gateway: + description: | + Gateway address for this network. + type: "string" + example: "172.17.0.1" + IPAddress: + description: | + IPv4 address. + type: "string" + example: "172.17.0.4" + IPPrefixLen: + description: | + Mask length of the IPv4 address. + type: "integer" + example: 16 + IPv6Gateway: + description: | + IPv6 gateway address. + type: "string" + example: "2001:db8:2::100" + GlobalIPv6Address: + description: | + Global IPv6 address. + type: "string" + example: "2001:db8::5689" + GlobalIPv6PrefixLen: + description: | + Mask length of the global IPv6 address. + type: "integer" + format: "int64" + example: 64 + DNSNames: + description: | + List of all DNS names an endpoint has on a specific network. This + list is based on the container name, network aliases, container short + ID, and hostname. + + These DNS names are non-fully qualified but can contain several dots. + You can get fully qualified DNS names by appending `.`. + For instance, if container name is `my.ctr` and the network is named + `testnet`, `DNSNames` will contain `my.ctr` and the FQDN will be + `my.ctr.testnet`. + type: array + items: + type: string + example: ["foobar", "server_x", "server_y", "my.ctr"] + + EndpointIPAMConfig: + description: | + EndpointIPAMConfig represents an endpoint's IPAM configuration. + type: "object" + x-nullable: true + properties: + IPv4Address: + type: "string" + example: "172.20.30.33" + IPv6Address: + type: "string" + example: "2001:db8:abcd::3033" + LinkLocalIPs: + type: "array" + items: + type: "string" + example: + - "169.254.34.68" + - "fe80::3468" + + PluginMount: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Source, Destination, Type, Options] + properties: + Name: + type: "string" + x-nullable: false + example: "some-mount" + Description: + type: "string" + x-nullable: false + example: "This is a mount that's used by the plugin." + Settable: + type: "array" + items: + type: "string" + Source: + type: "string" + example: "/var/lib/docker/plugins/" + Destination: + type: "string" + x-nullable: false + example: "/mnt/state" + Type: + type: "string" + x-nullable: false + example: "bind" + Options: + type: "array" + items: + type: "string" + example: + - "rbind" + - "rw" + + PluginDevice: + type: "object" + required: [Name, Description, Settable, Path] + x-nullable: false + properties: + Name: + type: "string" + x-nullable: false + Description: + type: "string" + x-nullable: false + Settable: + type: "array" + items: + type: "string" + Path: + type: "string" + example: "/dev/fuse" + + PluginEnv: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + Description: + x-nullable: false + type: "string" + Settable: + type: "array" + items: + type: "string" + Value: + type: "string" + + PluginInterfaceType: + type: "object" + x-nullable: false + required: [Prefix, Capability, Version] + properties: + Prefix: + type: "string" + x-nullable: false + Capability: + type: "string" + x-nullable: false + Version: + type: "string" + x-nullable: false + + PluginPrivilege: + description: | + Describes a permission the user has to accept upon installing + the plugin. + type: "object" + x-go-name: "PluginPrivilege" + properties: + Name: + type: "string" + example: "network" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - "host" + + Plugin: + description: "A plugin for the Engine API" + type: "object" + required: [Settings, Enabled, Config, Name] + properties: + Id: + type: "string" + example: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" + Name: + type: "string" + x-nullable: false + example: "tiborvass/sample-volume-plugin" + Enabled: + description: + True if the plugin is running. False if the plugin is not running, + only installed. + type: "boolean" + x-nullable: false + example: true + Settings: + description: "Settings that can be modified by users." + type: "object" + x-nullable: false + required: [Args, Devices, Env, Mounts] + properties: + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + type: "string" + example: + - "DEBUG=0" + Args: + type: "array" + items: + type: "string" + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PluginReference: + description: "plugin remote reference used to push/pull the plugin" + type: "string" + x-nullable: false + example: "localhost:5000/tiborvass/sample-volume-plugin:latest" + Config: + description: "The config of a plugin." + type: "object" + x-nullable: false + required: + - Description + - Documentation + - Interface + - Entrypoint + - WorkDir + - Network + - Linux + - PidHost + - PropagatedMount + - IpcHost + - Mounts + - Env + - Args + properties: + DockerVersion: + description: "Docker Version used to create the plugin" + type: "string" + x-nullable: false + example: "17.06.0-ce" + Description: + type: "string" + x-nullable: false + example: "A sample volume plugin for Docker" + Documentation: + type: "string" + x-nullable: false + example: "https://docs.docker.com/engine/extend/plugins/" + Interface: + description: "The interface between Docker and the plugin" + x-nullable: false + type: "object" + required: [Types, Socket] + properties: + Types: + type: "array" + items: + $ref: "#/definitions/PluginInterfaceType" + example: + - "docker.volumedriver/1.0" + Socket: + type: "string" + x-nullable: false + example: "plugins.sock" + ProtocolScheme: + type: "string" + example: "some.protocol/v1.0" + description: "Protocol to use for clients connecting to the plugin." + enum: + - "" + - "moby.plugins.http/v1" + Entrypoint: + type: "array" + items: + type: "string" + example: + - "/usr/bin/sample-volume-plugin" + - "/data" + WorkDir: + type: "string" + x-nullable: false + example: "/bin/" + User: + type: "object" + x-nullable: false + properties: + UID: + type: "integer" + format: "uint32" + example: 1000 + GID: + type: "integer" + format: "uint32" + example: 1000 + Network: + type: "object" + x-nullable: false + required: [Type] + properties: + Type: + x-nullable: false + type: "string" + example: "host" + Linux: + type: "object" + x-nullable: false + required: [Capabilities, AllowAllDevices, Devices] + properties: + Capabilities: + type: "array" + items: + type: "string" + example: + - "CAP_SYS_ADMIN" + - "CAP_SYSLOG" + AllowAllDevices: + type: "boolean" + x-nullable: false + example: false + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PropagatedMount: + type: "string" + x-nullable: false + example: "/mnt/volumes" + IpcHost: + type: "boolean" + x-nullable: false + example: false + PidHost: + type: "boolean" + x-nullable: false + example: false + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + $ref: "#/definitions/PluginEnv" + example: + - Name: "DEBUG" + Description: "If set, prints debug messages" + Settable: null + Value: "0" + Args: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + example: "args" + Description: + x-nullable: false + type: "string" + example: "command line arguments" + Settable: + type: "array" + items: + type: "string" + Value: + type: "array" + items: + type: "string" + rootfs: + type: "object" + properties: + type: + type: "string" + example: "layers" + diff_ids: + type: "array" + items: + type: "string" + example: + - "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887" + - "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" + + ObjectVersion: + description: | + The version number of the object such as node, service, etc. This is needed + to avoid conflicting writes. The client must send the version number along + with the modified specification when updating these objects. + + This approach ensures safe concurrency and determinism in that the change + on the object may not be applied if the version number has changed from the + last read. In other words, if two update requests specify the same base + version, only one of the requests can succeed. As a result, two separate + update requests that happen at the same time will not unintentionally + overwrite each other. + type: "object" + properties: + Index: + type: "integer" + format: "uint64" + example: 373531 + + NodeSpec: + type: "object" + properties: + Name: + description: "Name for the node." + type: "string" + example: "my-node" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Role: + description: "Role of the node." + type: "string" + enum: + - "worker" + - "manager" + example: "manager" + Availability: + description: "Availability of the node." + type: "string" + enum: + - "active" + - "pause" + - "drain" + example: "active" + example: + Availability: "active" + Name: "node-name" + Role: "manager" + Labels: + foo: "bar" + + Node: + type: "object" + properties: + ID: + type: "string" + example: "24ifsmvkjbyhk" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + description: | + Date and time at which the node was added to the swarm in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + UpdatedAt: + description: | + Date and time at which the node was last updated in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2017-08-09T07:09:37.632105588Z" + Spec: + $ref: "#/definitions/NodeSpec" + Description: + $ref: "#/definitions/NodeDescription" + Status: + $ref: "#/definitions/NodeStatus" + ManagerStatus: + $ref: "#/definitions/ManagerStatus" + + NodeDescription: + description: | + NodeDescription encapsulates the properties of the Node as reported by the + agent. + type: "object" + properties: + Hostname: + type: "string" + example: "bf3067039e47" + Platform: + $ref: "#/definitions/Platform" + Resources: + $ref: "#/definitions/ResourceObject" + Engine: + $ref: "#/definitions/EngineDescription" + TLSInfo: + $ref: "#/definitions/TLSInfo" + + Platform: + description: | + Platform represents the platform (Arch/OS). + type: "object" + properties: + Architecture: + description: | + Architecture represents the hardware architecture (for example, + `x86_64`). + type: "string" + example: "x86_64" + OS: + description: | + OS represents the Operating System (for example, `linux` or `windows`). + type: "string" + example: "linux" + + EngineDescription: + description: "EngineDescription provides information about an engine." + type: "object" + properties: + EngineVersion: + type: "string" + example: "17.06.0" + Labels: + type: "object" + additionalProperties: + type: "string" + example: + foo: "bar" + Plugins: + type: "array" + items: + type: "object" + properties: + Type: + type: "string" + Name: + type: "string" + example: + - Type: "Log" + Name: "awslogs" + - Type: "Log" + Name: "fluentd" + - Type: "Log" + Name: "gcplogs" + - Type: "Log" + Name: "gelf" + - Type: "Log" + Name: "journald" + - Type: "Log" + Name: "json-file" + - Type: "Log" + Name: "splunk" + - Type: "Log" + Name: "syslog" + - Type: "Network" + Name: "bridge" + - Type: "Network" + Name: "host" + - Type: "Network" + Name: "ipvlan" + - Type: "Network" + Name: "macvlan" + - Type: "Network" + Name: "null" + - Type: "Network" + Name: "overlay" + - Type: "Volume" + Name: "local" + - Type: "Volume" + Name: "localhost:5000/vieux/sshfs:latest" + - Type: "Volume" + Name: "vieux/sshfs:latest" + + TLSInfo: + description: | + Information about the issuer of leaf TLS certificates and the trusted root + CA certificate. + type: "object" + properties: + TrustRoot: + description: | + The root CA certificate(s) that are used to validate leaf TLS + certificates. + type: "string" + CertIssuerSubject: + description: + The base64-url-safe-encoded raw subject bytes of the issuer. + type: "string" + CertIssuerPublicKey: + description: | + The base64-url-safe-encoded raw public key bytes of the issuer. + type: "string" + example: + TrustRoot: | + -----BEGIN CERTIFICATE----- + MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw + EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 + MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH + A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf + 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB + Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO + PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz + pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H + -----END CERTIFICATE----- + CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" + CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" + + NodeStatus: + description: | + NodeStatus represents the status of a node. + + It provides the current status of the node, as seen by the manager. + type: "object" + properties: + State: + $ref: "#/definitions/NodeState" + Message: + type: "string" + example: "" + Addr: + description: "IP address of the node." + type: "string" + example: "172.17.0.2" + + NodeState: + description: "NodeState represents the state of a node." + type: "string" + enum: + - "unknown" + - "down" + - "ready" + - "disconnected" + example: "ready" + + ManagerStatus: + description: | + ManagerStatus represents the status of a manager. + + It provides the current status of a node's manager component, if the node + is a manager. + x-nullable: true + type: "object" + properties: + Leader: + type: "boolean" + default: false + example: true + Reachability: + $ref: "#/definitions/Reachability" + Addr: + description: | + The IP address and port at which the manager is reachable. + type: "string" + example: "10.0.0.46:2377" + + Reachability: + description: "Reachability represents the reachability of a node." + type: "string" + enum: + - "unknown" + - "unreachable" + - "reachable" + example: "reachable" + + SwarmSpec: + description: "User modifiable swarm configuration." + type: "object" + properties: + Name: + description: "Name of the swarm." + type: "string" + example: "default" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.corp.type: "production" + com.example.corp.department: "engineering" + Orchestration: + description: "Orchestration configuration." + type: "object" + x-nullable: true + properties: + TaskHistoryRetentionLimit: + description: | + The number of historic tasks to keep per instance or node. If + negative, never remove completed or failed tasks. + type: "integer" + format: "int64" + example: 10 + Raft: + description: "Raft configuration." + type: "object" + properties: + SnapshotInterval: + description: "The number of log entries between snapshots." + type: "integer" + format: "uint64" + example: 10000 + KeepOldSnapshots: + description: | + The number of snapshots to keep beyond the current snapshot. + type: "integer" + format: "uint64" + LogEntriesForSlowFollowers: + description: | + The number of log entries to keep around to sync up slow followers + after a snapshot is created. + type: "integer" + format: "uint64" + example: 500 + ElectionTick: + description: | + The number of ticks that a follower will wait for a message from + the leader before becoming a candidate and starting an election. + `ElectionTick` must be greater than `HeartbeatTick`. + + A tick currently defaults to one second, so these translate + directly to seconds currently, but this is NOT guaranteed. + type: "integer" + example: 3 + HeartbeatTick: + description: | + The number of ticks between heartbeats. Every HeartbeatTick ticks, + the leader will send a heartbeat to the followers. + + A tick currently defaults to one second, so these translate + directly to seconds currently, but this is NOT guaranteed. + type: "integer" + example: 1 + Dispatcher: + description: "Dispatcher configuration." + type: "object" + x-nullable: true + properties: + HeartbeatPeriod: + description: | + The delay for an agent to send a heartbeat to the dispatcher. + type: "integer" + format: "int64" + example: 5000000000 + CAConfig: + description: "CA configuration." + type: "object" + x-nullable: true + properties: + NodeCertExpiry: + description: "The duration node certificates are issued for." + type: "integer" + format: "int64" + example: 7776000000000000 + ExternalCAs: + description: | + Configuration for forwarding signing requests to an external + certificate authority. + type: "array" + items: + type: "object" + properties: + Protocol: + description: | + Protocol for communication with the external CA (currently + only `cfssl` is supported). + type: "string" + enum: + - "cfssl" + default: "cfssl" + URL: + description: | + URL where certificate signing requests should be sent. + type: "string" + Options: + description: | + An object with key/value pairs that are interpreted as + protocol-specific options for the external CA driver. + type: "object" + additionalProperties: + type: "string" + CACert: + description: | + The root CA certificate (in PEM format) this external CA uses + to issue TLS certificates (assumed to be to the current swarm + root CA certificate if not provided). + type: "string" + SigningCACert: + description: | + The desired signing CA certificate for all swarm node TLS leaf + certificates, in PEM format. + type: "string" + SigningCAKey: + description: | + The desired signing CA key for all swarm node TLS leaf certificates, + in PEM format. + type: "string" + ForceRotate: + description: | + An integer whose purpose is to force swarm to generate a new + signing CA certificate and key, if none have been specified in + `SigningCACert` and `SigningCAKey` + format: "uint64" + type: "integer" + EncryptionConfig: + description: "Parameters related to encryption-at-rest." + type: "object" + properties: + AutoLockManagers: + description: | + If set, generate a key and use it to lock data stored on the + managers. + type: "boolean" + example: false + TaskDefaults: + description: "Defaults for creating tasks in this cluster." + type: "object" + properties: + LogDriver: + description: | + The log driver to use for tasks created in the orchestrator if + unspecified by a service. + + Updating this value only affects new tasks. Existing tasks continue + to use their previously configured log driver until recreated. + type: "object" + properties: + Name: + description: | + The log driver to use as a default for new tasks. + type: "string" + example: "json-file" + Options: + description: | + Driver-specific options for the selected log driver, specified + as key/value pairs. + type: "object" + additionalProperties: + type: "string" + example: + "max-file": "10" + "max-size": "100m" + + # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but + # without `JoinTokens`. + ClusterInfo: + description: | + ClusterInfo represents information about the swarm as is returned by the + "/info" endpoint. Join-tokens are not included. + x-nullable: true + type: "object" + properties: + ID: + description: "The ID of the swarm." + type: "string" + example: "abajmipo7b4xz5ip2nrla6b11" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + description: | + Date and time at which the swarm was initialised in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + UpdatedAt: + description: | + Date and time at which the swarm was last updated in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2017-08-09T07:09:37.632105588Z" + Spec: + $ref: "#/definitions/SwarmSpec" + TLSInfo: + $ref: "#/definitions/TLSInfo" + RootRotationInProgress: + description: | + Whether there is currently a root CA rotation in progress for the swarm + type: "boolean" + example: false + DataPathPort: + description: | + DataPathPort specifies the data path port number for data traffic. + Acceptable port range is 1024 to 49151. + If no port is set or is set to 0, the default port (4789) is used. + type: "integer" + format: "uint32" + default: 4789 + example: 4789 + DefaultAddrPool: + description: | + Default Address Pool specifies default subnet pools for global scope + networks. + type: "array" + items: + type: "string" + format: "CIDR" + example: ["10.10.0.0/16", "20.20.0.0/16"] + SubnetSize: + description: | + SubnetSize specifies the subnet size of the networks created from the + default subnet pool. + type: "integer" + format: "uint32" + maximum: 29 + default: 24 + example: 24 + + JoinTokens: + description: | + JoinTokens contains the tokens workers and managers need to join the swarm. + type: "object" + properties: + Worker: + description: | + The token workers can use to join the swarm. + type: "string" + example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" + Manager: + description: | + The token managers can use to join the swarm. + type: "string" + example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + + Swarm: + type: "object" + allOf: + - $ref: "#/definitions/ClusterInfo" + - type: "object" + properties: + JoinTokens: + $ref: "#/definitions/JoinTokens" + + TaskSpec: + description: "User modifiable task configuration." + type: "object" + properties: + PluginSpec: + type: "object" + description: | + Plugin spec for the service. *(Experimental release only.)* + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + properties: + Name: + description: "The name or 'alias' to use for the plugin." + type: "string" + Remote: + description: "The plugin image reference to use." + type: "string" + Disabled: + description: "Disable the plugin once scheduled." + type: "boolean" + PluginPrivilege: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + ContainerSpec: + type: "object" + description: | + Container spec for the service. + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + properties: + Image: + description: "The image name to use for the container" + type: "string" + Labels: + description: "User-defined key/value data." + type: "object" + additionalProperties: + type: "string" + Command: + description: "The command to be run in the image." + type: "array" + items: + type: "string" + Args: + description: "Arguments to the command." + type: "array" + items: + type: "string" + Hostname: + description: | + The hostname to use for the container, as a valid + [RFC 1123](https://tools.ietf.org/html/rfc1123) hostname. + type: "string" + Env: + description: | + A list of environment variables in the form `VAR=value`. + type: "array" + items: + type: "string" + Dir: + description: "The working directory for commands to run in." + type: "string" + User: + description: "The user inside the container." + type: "string" + Groups: + type: "array" + description: | + A list of additional groups that the container process will run as. + items: + type: "string" + Privileges: + type: "object" + description: "Security options for the container" + properties: + CredentialSpec: + type: "object" + description: "CredentialSpec for managed service account (Windows only)" + properties: + Config: + type: "string" + example: "0bt9dmxjvjiqermk6xrop3ekq" + description: | + Load credential spec from a Swarm Config with the given ID. + The specified config must also be present in the Configs + field with the Runtime property set. + +


+ + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + File: + type: "string" + example: "spec.json" + description: | + Load credential spec from this file. The file is read by + the daemon, and must be present in the `CredentialSpecs` + subdirectory in the docker data directory, which defaults + to `C:\ProgramData\Docker\` on Windows. + + For example, specifying `spec.json` loads + `C:\ProgramData\Docker\CredentialSpecs\spec.json`. + +


+ + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + Registry: + type: "string" + description: | + Load credential spec from this value in the Windows + registry. The specified registry value must be located in: + + `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` + +


+ + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + SELinuxContext: + type: "object" + description: "SELinux labels of the container" + properties: + Disable: + type: "boolean" + description: "Disable SELinux" + User: + type: "string" + description: "SELinux user label" + Role: + type: "string" + description: "SELinux role label" + Type: + type: "string" + description: "SELinux type label" + Level: + type: "string" + description: "SELinux level label" + Seccomp: + type: "object" + description: "Options for configuring seccomp on the container" + properties: + Mode: + type: "string" + enum: + - "default" + - "unconfined" + - "custom" + Profile: + description: "The custom seccomp profile as a json object" + type: "string" + AppArmor: + type: "object" + description: "Options for configuring AppArmor on the container" + properties: + Mode: + type: "string" + enum: + - "default" + - "disabled" + NoNewPrivileges: + type: "boolean" + description: "Configuration of the no_new_privs bit in the container" + + TTY: + description: "Whether a pseudo-TTY should be allocated." + type: "boolean" + OpenStdin: + description: "Open `stdin`" + type: "boolean" + ReadOnly: + description: "Mount the container's root filesystem as read only." + type: "boolean" + Mounts: + description: | + Specification for mounts to be added to containers created as part + of the service. + type: "array" + items: + $ref: "#/definitions/Mount" + StopSignal: + description: "Signal to stop the container." + type: "string" + StopGracePeriod: + description: | + Amount of time to wait for the container to terminate before + forcefully killing it. + type: "integer" + format: "int64" + HealthCheck: + $ref: "#/definitions/HealthConfig" + Hosts: + type: "array" + description: | + A list of hostname/IP mappings to add to the container's `hosts` + file. The format of extra hosts is specified in the + [hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html) + man page: + + IP_address canonical_hostname [aliases...] + items: + type: "string" + DNSConfig: + description: | + Specification for DNS related configurations in resolver configuration + file (`resolv.conf`). + type: "object" + properties: + Nameservers: + description: "The IP addresses of the name servers." + type: "array" + items: + type: "string" + Search: + description: "A search list for host-name lookup." + type: "array" + items: + type: "string" + Options: + description: | + A list of internal resolver variables to be modified (e.g., + `debug`, `ndots:3`, etc.). + type: "array" + items: + type: "string" + Secrets: + description: | + Secrets contains references to zero or more secrets that will be + exposed to the service. + type: "array" + items: + type: "object" + properties: + File: + description: | + File represents a specific target that is backed by a file. + type: "object" + properties: + Name: + description: | + Name represents the final filename in the filesystem. + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + SecretID: + description: | + SecretID represents the ID of the specific secret that we're + referencing. + type: "string" + SecretName: + description: | + SecretName is the name of the secret that this references, + but this is just provided for lookup/display purposes. The + secret in the reference will be identified by its ID. + type: "string" + OomScoreAdj: + type: "integer" + format: "int64" + description: | + An integer value containing the score given to the container in + order to tune OOM killer preferences. + example: 0 + Configs: + description: | + Configs contains references to zero or more configs that will be + exposed to the service. + type: "array" + items: + type: "object" + properties: + File: + description: | + File represents a specific target that is backed by a file. + +


+ + > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive + type: "object" + properties: + Name: + description: | + Name represents the final filename in the filesystem. + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + Runtime: + description: | + Runtime represents a target that is not mounted into the + container but is used by the task + +


+ + > **Note**: `Configs.File` and `Configs.Runtime` are mutually + > exclusive + type: "object" + ConfigID: + description: | + ConfigID represents the ID of the specific config that we're + referencing. + type: "string" + ConfigName: + description: | + ConfigName is the name of the config that this references, + but this is just provided for lookup/display purposes. The + config in the reference will be identified by its ID. + type: "string" + Isolation: + type: "string" + description: | + Isolation technology of the containers running the service. + (Windows only) + enum: + - "default" + - "process" + - "hyperv" + - "" + Init: + description: | + Run an init inside the container that forwards signals and reaps + processes. This field is omitted if empty, and the default (as + configured on the daemon) is used. + type: "boolean" + x-nullable: true + Sysctls: + description: | + Set kernel namedspaced parameters (sysctls) in the container. + The Sysctls option on services accepts the same sysctls as the + are supported on containers. Note that while the same sysctls are + supported, no guarantees or checks are made about their + suitability for a clustered environment, and it's up to the user + to determine whether a given sysctl will work properly in a + Service. + type: "object" + additionalProperties: + type: "string" + # This option is not used by Windows containers + CapabilityAdd: + type: "array" + description: | + A list of kernel capabilities to add to the default set + for the container. + items: + type: "string" + example: + - "CAP_NET_RAW" + - "CAP_SYS_ADMIN" + - "CAP_SYS_CHROOT" + - "CAP_SYSLOG" + CapabilityDrop: + type: "array" + description: | + A list of kernel capabilities to drop from the default set + for the container. + items: + type: "string" + example: + - "CAP_NET_RAW" + Ulimits: + description: | + A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + NetworkAttachmentSpec: + description: | + Read-only spec type for non-swarm containers attached to swarm overlay + networks. + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + type: "object" + properties: + ContainerID: + description: "ID of the container represented by this task" + type: "string" + Resources: + description: | + Resource requirements which apply to each individual container created + as part of the service. + type: "object" + properties: + Limits: + description: "Define resources limits." + $ref: "#/definitions/Limit" + Reservations: + description: "Define resources reservation." + $ref: "#/definitions/ResourceObject" + RestartPolicy: + description: | + Specification for the restart policy which applies to containers + created as part of this service. + type: "object" + properties: + Condition: + description: "Condition for restart." + type: "string" + enum: + - "none" + - "on-failure" + - "any" + Delay: + description: "Delay between restart attempts." + type: "integer" + format: "int64" + MaxAttempts: + description: | + Maximum attempts to restart a given container before giving up + (default value is 0, which is ignored). + type: "integer" + format: "int64" + default: 0 + Window: + description: | + Windows is the time window used to evaluate the restart policy + (default value is 0, which is unbounded). + type: "integer" + format: "int64" + default: 0 + Placement: + type: "object" + properties: + Constraints: + description: | + An array of constraint expressions to limit the set of nodes where + a task can be scheduled. Constraint expressions can either use a + _match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find + nodes that satisfy every expression (AND match). Constraints can + match node or Docker Engine labels as follows: + + node attribute | matches | example + ---------------------|--------------------------------|----------------------------------------------- + `node.id` | Node ID | `node.id==2ivku8v2gvtg4` + `node.hostname` | Node hostname | `node.hostname!=node-2` + `node.role` | Node role (`manager`/`worker`) | `node.role==manager` + `node.platform.os` | Node operating system | `node.platform.os==windows` + `node.platform.arch` | Node architecture | `node.platform.arch==x86_64` + `node.labels` | User-defined node labels | `node.labels.security==high` + `engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-24.04` + + `engine.labels` apply to Docker Engine labels like operating system, + drivers, etc. Swarm administrators add `node.labels` for operational + purposes by using the [`node update endpoint`](#operation/NodeUpdate). + + type: "array" + items: + type: "string" + example: + - "node.hostname!=node3.corp.example.com" + - "node.role!=manager" + - "node.labels.type==production" + - "node.platform.os==linux" + - "node.platform.arch==x86_64" + Preferences: + description: | + Preferences provide a way to make the scheduler aware of factors + such as topology. They are provided in order from highest to + lowest precedence. + type: "array" + items: + type: "object" + properties: + Spread: + type: "object" + properties: + SpreadDescriptor: + description: | + label descriptor, such as `engine.labels.az`. + type: "string" + example: + - Spread: + SpreadDescriptor: "node.labels.datacenter" + - Spread: + SpreadDescriptor: "node.labels.rack" + MaxReplicas: + description: | + Maximum number of replicas for per node (default value is 0, which + is unlimited) + type: "integer" + format: "int64" + default: 0 + Platforms: + description: | + Platforms stores all the platforms that the service's image can + run on. This field is used in the platform filter for scheduling. + If empty, then the platform filter is off, meaning there are no + scheduling restrictions. + type: "array" + items: + $ref: "#/definitions/Platform" + ForceUpdate: + description: | + A counter that triggers an update even if no relevant parameters have + been changed. + type: "integer" + Runtime: + description: | + Runtime is the type of runtime specified for the task executor. + type: "string" + Networks: + description: "Specifies which networks the service should attach to." + type: "array" + items: + $ref: "#/definitions/NetworkAttachmentConfig" + LogDriver: + description: | + Specifies the log driver to use for tasks created from this spec. If + not present, the default one for the swarm will be used, finally + falling back to the engine default if not specified. + type: "object" + properties: + Name: + type: "string" + Options: + type: "object" + additionalProperties: + type: "string" + + TaskState: + type: "string" + enum: + - "new" + - "allocated" + - "pending" + - "assigned" + - "accepted" + - "preparing" + - "ready" + - "starting" + - "running" + - "complete" + - "shutdown" + - "failed" + - "rejected" + - "remove" + - "orphaned" + + ContainerStatus: + type: "object" + description: "represents the status of a container." + properties: + ContainerID: + type: "string" + PID: + type: "integer" + ExitCode: + type: "integer" + + PortStatus: + type: "object" + description: "represents the port status of a task's host ports whose service has published host ports" + properties: + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + + TaskStatus: + type: "object" + description: "represents the status of a task." + properties: + Timestamp: + type: "string" + format: "dateTime" + State: + $ref: "#/definitions/TaskState" + Message: + type: "string" + Err: + type: "string" + ContainerStatus: + $ref: "#/definitions/ContainerStatus" + PortStatus: + $ref: "#/definitions/PortStatus" + + Task: + type: "object" + properties: + ID: + description: "The ID of the task." + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Name: + description: "Name of the task." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Spec: + $ref: "#/definitions/TaskSpec" + ServiceID: + description: "The ID of the service this task is part of." + type: "string" + Slot: + type: "integer" + NodeID: + description: "The ID of the node that this task is on." + type: "string" + AssignedGenericResources: + $ref: "#/definitions/GenericResources" + Status: + $ref: "#/definitions/TaskStatus" + DesiredState: + $ref: "#/definitions/TaskState" + JobIteration: + description: | + If the Service this Task belongs to is a job-mode service, contains + the JobIteration of the Service this Task was created for. Absent if + the Task was created for a Replicated or Global Service. + $ref: "#/definitions/ObjectVersion" + example: + ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + AssignedGenericResources: + - DiscreteResourceSpec: + Kind: "SSD" + Value: 3 + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID1" + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID2" + + ServiceSpec: + description: "User modifiable configuration for a service." + type: object + properties: + Name: + description: "Name of the service." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + TaskTemplate: + $ref: "#/definitions/TaskSpec" + Mode: + description: "Scheduling mode for the service." + type: "object" + properties: + Replicated: + type: "object" + properties: + Replicas: + type: "integer" + format: "int64" + Global: + type: "object" + ReplicatedJob: + description: | + The mode used for services with a finite number of tasks that run + to a completed state. + type: "object" + properties: + MaxConcurrent: + description: | + The maximum number of replicas to run simultaneously. + type: "integer" + format: "int64" + default: 1 + TotalCompletions: + description: | + The total number of replicas desired to reach the Completed + state. If unset, will default to the value of `MaxConcurrent` + type: "integer" + format: "int64" + GlobalJob: + description: | + The mode used for services which run a task to the completed state + on each valid node. + type: "object" + UpdateConfig: + description: "Specification for the update strategy of the service." + type: "object" + properties: + Parallelism: + description: | + Maximum number of tasks to be updated in one iteration (0 means + unlimited parallelism). + type: "integer" + format: "int64" + Delay: + description: "Amount of time between updates, in nanoseconds." + type: "integer" + format: "int64" + FailureAction: + description: | + Action to take if an updated task fails to run, or stops running + during the update. + type: "string" + enum: + - "continue" + - "pause" + - "rollback" + Monitor: + description: | + Amount of time to monitor each updated task for failures, in + nanoseconds. + type: "integer" + format: "int64" + MaxFailureRatio: + description: | + The fraction of tasks that may fail during an update before the + failure action is invoked, specified as a floating point number + between 0 and 1. + type: "number" + default: 0 + Order: + description: | + The order of operations when rolling out an updated task. Either + the old task is shut down before the new task is started, or the + new task is started before the old task is shut down. + type: "string" + enum: + - "stop-first" + - "start-first" + RollbackConfig: + description: "Specification for the rollback strategy of the service." + type: "object" + properties: + Parallelism: + description: | + Maximum number of tasks to be rolled back in one iteration (0 means + unlimited parallelism). + type: "integer" + format: "int64" + Delay: + description: | + Amount of time between rollback iterations, in nanoseconds. + type: "integer" + format: "int64" + FailureAction: + description: | + Action to take if an rolled back task fails to run, or stops + running during the rollback. + type: "string" + enum: + - "continue" + - "pause" + Monitor: + description: | + Amount of time to monitor each rolled back task for failures, in + nanoseconds. + type: "integer" + format: "int64" + MaxFailureRatio: + description: | + The fraction of tasks that may fail during a rollback before the + failure action is invoked, specified as a floating point number + between 0 and 1. + type: "number" + default: 0 + Order: + description: | + The order of operations when rolling back a task. Either the old + task is shut down before the new task is started, or the new task + is started before the old task is shut down. + type: "string" + enum: + - "stop-first" + - "start-first" + Networks: + description: | + Specifies which networks the service should attach to. + + Deprecated: This field is deprecated since v1.44. The Networks field in TaskSpec should be used instead. + type: "array" + items: + $ref: "#/definitions/NetworkAttachmentConfig" + + EndpointSpec: + $ref: "#/definitions/EndpointSpec" + + EndpointPortConfig: + type: "object" + properties: + Name: + type: "string" + Protocol: + type: "string" + enum: + - "tcp" + - "udp" + - "sctp" + TargetPort: + description: "The port inside the container." + type: "integer" + PublishedPort: + description: "The port on the swarm hosts." + type: "integer" + PublishMode: + description: | + The mode in which port is published. + +


+ + - "ingress" makes the target port accessible on every node, + regardless of whether there is a task for the service running on + that node or not. + - "host" bypasses the routing mesh and publish the port directly on + the swarm node where that service is running. + + type: "string" + enum: + - "ingress" + - "host" + default: "ingress" + example: "ingress" + + EndpointSpec: + description: "Properties that can be configured to access and load balance a service." + type: "object" + properties: + Mode: + description: | + The mode of resolution to use for internal load balancing between tasks. + type: "string" + enum: + - "vip" + - "dnsrr" + default: "vip" + Ports: + description: | + List of exposed ports that this service is accessible on from the + outside. Ports can only be provided if `vip` resolution mode is used. + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + + Service: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ServiceSpec" + Endpoint: + type: "object" + properties: + Spec: + $ref: "#/definitions/EndpointSpec" + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + VirtualIPs: + type: "array" + items: + type: "object" + properties: + NetworkID: + type: "string" + Addr: + type: "string" + UpdateStatus: + description: "The status of a service update." + type: "object" + properties: + State: + type: "string" + enum: + - "updating" + - "paused" + - "completed" + StartedAt: + type: "string" + format: "dateTime" + CompletedAt: + type: "string" + format: "dateTime" + Message: + type: "string" + ServiceStatus: + description: | + The status of the service's tasks. Provided only when requested as + part of a ServiceList operation. + type: "object" + properties: + RunningTasks: + description: | + The number of tasks for the service currently in the Running state. + type: "integer" + format: "uint64" + example: 7 + DesiredTasks: + description: | + The number of tasks for the service desired to be running. + For replicated services, this is the replica count from the + service spec. For global services, this is computed by taking + count of all tasks for the service with a Desired State other + than Shutdown. + type: "integer" + format: "uint64" + example: 10 + CompletedTasks: + description: | + The number of tasks for a job that are in the Completed state. + This field must be cross-referenced with the service type, as the + value of 0 may mean the service is not in a job mode, or it may + mean the job-mode service has no tasks yet Completed. + type: "integer" + format: "uint64" + JobStatus: + description: | + The status of the service when it is in one of ReplicatedJob or + GlobalJob modes. Absent on Replicated and Global mode services. The + JobIteration is an ObjectVersion, but unlike the Service's version, + does not need to be sent with an update request. + type: "object" + properties: + JobIteration: + description: | + JobIteration is a value increased each time a Job is executed, + successfully or otherwise. "Executed", in this case, means the + job as a whole has been started, not that an individual Task has + been launched. A job is "Executed" when its ServiceSpec is + updated. JobIteration can be used to disambiguate Tasks belonging + to different executions of a job. Though JobIteration will + increase with each subsequent execution, it may not necessarily + increase by 1, and so JobIteration should not be used to + $ref: "#/definitions/ObjectVersion" + LastExecution: + description: | + The last time, as observed by the server, that this job was + started. + type: "string" + format: "dateTime" + example: + ID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Version: + Index: 19 + CreatedAt: "2016-06-07T21:05:51.880065305Z" + UpdatedAt: "2016-06-07T21:07:29.962229872Z" + Spec: + Name: "hopeful_cori" + TaskTemplate: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Endpoint: + Spec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + VirtualIPs: + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.2/16" + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.3/16" + + ImageDeleteResponseItem: + type: "object" + x-go-name: "DeleteResponse" + properties: + Untagged: + description: "The image ID of an image that was untagged" + type: "string" + Deleted: + description: "The image ID of an image that was deleted" + type: "string" + + ServiceCreateResponse: + type: "object" + description: | + contains the information returned to a client on the + creation of a new service. + properties: + ID: + description: "The ID of the created service." + type: "string" + x-nullable: false + example: "ak7w3gjqoa3kuz8xcpnyy0pvl" + Warnings: + description: | + Optional warning message. + + FIXME(thaJeztah): this should have "omitempty" in the generated type. + type: "array" + x-nullable: true + items: + type: "string" + example: + - "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + + ServiceUpdateResponse: + type: "object" + properties: + Warnings: + description: "Optional warning messages" + type: "array" + items: + type: "string" + example: + Warnings: + - "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + + ContainerInspectResponse: + type: "object" + title: "ContainerInspectResponse" + x-go-name: "InspectResponse" + properties: + Id: + description: |- + The ID of this container as a 128-bit (64-character) hexadecimal string (32 bytes). + type: "string" + x-go-name: "ID" + minLength: 64 + maxLength: 64 + pattern: "^[0-9a-fA-F]{64}$" + example: "aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf" + Created: + description: |- + Date and time at which the container was created, formatted in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + x-nullable: true + example: "2025-02-17T17:43:39.64001363Z" + Path: + description: |- + The path to the command being run + type: "string" + example: "/bin/sh" + Args: + description: "The arguments to the command being run" + type: "array" + items: + type: "string" + example: + - "-c" + - "exit 9" + State: + $ref: "#/definitions/ContainerState" + Image: + description: |- + The ID (digest) of the image that this container was created from. + type: "string" + example: "sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782" + ResolvConfPath: + description: |- + Location of the `/etc/resolv.conf` generated for the container on the + host. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/resolv.conf" + HostnamePath: + description: |- + Location of the `/etc/hostname` generated for the container on the + host. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/hostname" + HostsPath: + description: |- + Location of the `/etc/hosts` generated for the container on the + host. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/hosts" + LogPath: + description: |- + Location of the file used to buffer the container's logs. Depending on + the logging-driver used for the container, this field may be omitted. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + x-nullable: true + example: "/var/lib/docker/containers/5b7c7e2b992aa426584ce6c47452756066be0e503a08b4516a433a54d2f69e59/5b7c7e2b992aa426584ce6c47452756066be0e503a08b4516a433a54d2f69e59-json.log" + Name: + description: |- + The name associated with this container. + + For historic reasons, the name may be prefixed with a forward-slash (`/`). + type: "string" + example: "/funny_chatelet" + RestartCount: + description: |- + Number of times the container was restarted since it was created, + or since daemon was started. + type: "integer" + example: 0 + Driver: + description: |- + The storage-driver used for the container's filesystem (graph-driver + or snapshotter). + type: "string" + example: "overlayfs" + Platform: + description: |- + The platform (operating system) for which the container was created. + + This field was introduced for the experimental "LCOW" (Linux Containers + On Windows) features, which has been removed. In most cases, this field + is equal to the host's operating system (`linux` or `windows`). + type: "string" + example: "linux" + ImageManifestDescriptor: + $ref: "#/definitions/OCIDescriptor" + description: |- + OCI descriptor of the platform-specific manifest of the image + the container was created from. + + Note: Only available if the daemon provides a multi-platform + image store. + MountLabel: + description: |- + SELinux mount label set for the container. + type: "string" + example: "" + ProcessLabel: + description: |- + SELinux process label set for the container. + type: "string" + example: "" + AppArmorProfile: + description: |- + The AppArmor profile set for the container. + type: "string" + example: "" + ExecIDs: + description: |- + IDs of exec instances that are running in the container. + type: "array" + items: + type: "string" + x-nullable: true + example: + - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" + - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" + HostConfig: + $ref: "#/definitions/HostConfig" + GraphDriver: + $ref: "#/definitions/DriverData" + SizeRw: + description: |- + The size of files that have been created or changed by this container. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "122880" + SizeRootFs: + description: |- + The total size of all files in the read-only layers from the image + that the container uses. These layers can be shared between containers. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "1653948416" + Mounts: + description: |- + List of mounts used by the container. + type: "array" + items: + $ref: "#/definitions/MountPoint" + Config: + $ref: "#/definitions/ContainerConfig" + NetworkSettings: + $ref: "#/definitions/NetworkSettings" + + ContainerSummary: + type: "object" + properties: + Id: + description: |- + The ID of this container as a 128-bit (64-character) hexadecimal string (32 bytes). + type: "string" + x-go-name: "ID" + minLength: 64 + maxLength: 64 + pattern: "^[0-9a-fA-F]{64}$" + example: "aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf" + Names: + description: |- + The names associated with this container. Most containers have a single + name, but when using legacy "links", the container can have multiple + names. + + For historic reasons, names are prefixed with a forward-slash (`/`). + type: "array" + items: + type: "string" + example: + - "/funny_chatelet" + Image: + description: |- + The name or ID of the image used to create the container. + + This field shows the image reference as was specified when creating the container, + which can be in its canonical form (e.g., `docker.io/library/ubuntu:latest` + or `docker.io/library/ubuntu@sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782`), + short form (e.g., `ubuntu:latest`)), or the ID(-prefix) of the image (e.g., `72297848456d`). + + The content of this field can be updated at runtime if the image used to + create the container is untagged, in which case the field is updated to + contain the the image ID (digest) it was resolved to in its canonical, + non-truncated form (e.g., `sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782`). + type: "string" + example: "docker.io/library/ubuntu:latest" + ImageID: + description: |- + The ID (digest) of the image that this container was created from. + type: "string" + example: "sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782" + ImageManifestDescriptor: + $ref: "#/definitions/OCIDescriptor" + x-nullable: true + description: | + OCI descriptor of the platform-specific manifest of the image + the container was created from. + + Note: Only available if the daemon provides a multi-platform + image store. + + This field is not populated in the `GET /system/df` endpoint. + Command: + description: "Command to run when starting the container" + type: "string" + example: "/bin/bash" + Created: + description: |- + Date and time at which the container was created as a Unix timestamp + (number of seconds since EPOCH). + type: "integer" + format: "int64" + example: "1739811096" + Ports: + description: |- + Port-mappings for the container. + type: "array" + items: + $ref: "#/definitions/Port" + SizeRw: + description: |- + The size of files that have been created or changed by this container. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "122880" + SizeRootFs: + description: |- + The total size of all files in the read-only layers from the image + that the container uses. These layers can be shared between containers. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "1653948416" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + State: + description: | + The state of this container. + type: "string" + enum: + - "created" + - "running" + - "paused" + - "restarting" + - "exited" + - "removing" + - "dead" + example: "running" + Status: + description: |- + Additional human-readable status of this container (e.g. `Exit 0`) + type: "string" + example: "Up 4 days" + HostConfig: + type: "object" + description: |- + Summary of host-specific runtime information of the container. This + is a reduced set of information in the container's "HostConfig" as + available in the container "inspect" response. + properties: + NetworkMode: + description: |- + Networking mode (`host`, `none`, `container:`) or name of the + primary network the container is using. + + This field is primarily for backward compatibility. The container + can be connected to multiple networks for which information can be + found in the `NetworkSettings.Networks` field, which enumerates + settings per network. + type: "string" + example: "mynetwork" + Annotations: + description: |- + Arbitrary key-value metadata attached to the container. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + io.kubernetes.docker.type: "container" + io.kubernetes.sandbox.id: "3befe639bed0fd6afdd65fd1fa84506756f59360ec4adc270b0fdac9be22b4d3" + NetworkSettings: + description: |- + Summary of the container's network settings + type: "object" + properties: + Networks: + type: "object" + description: |- + Summary of network-settings for each network the container is + attached to. + additionalProperties: + $ref: "#/definitions/EndpointSettings" + Mounts: + type: "array" + description: |- + List of mounts used by the container. + items: + $ref: "#/definitions/MountPoint" + + Driver: + description: "Driver represents a driver (network, logging, secrets)." + type: "object" + required: [Name] + properties: + Name: + description: "Name of the driver." + type: "string" + x-nullable: false + example: "some-driver" + Options: + description: "Key/value map of driver-specific options." + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: + OptionA: "value for driver-specific option A" + OptionB: "value for driver-specific option B" + + SecretSpec: + type: "object" + properties: + Name: + description: "User-defined name of the secret." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Data: + description: | + Data is the data to store as a secret, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + It must be empty if the Driver field is set, in which case the data is + loaded from an external secret store. The maximum allowed size is 500KB, + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). + + This field is only used to _create_ a secret, and is not returned by + other endpoints. + type: "string" + example: "" + Driver: + description: | + Name of the secrets driver used to fetch the secret's value from an + external secret store. + $ref: "#/definitions/Driver" + Templating: + description: | + Templating driver, if applicable + + Templating controls whether and how to evaluate the config payload as + a template. If no driver is set, no templating is used. + $ref: "#/definitions/Driver" + + Secret: + type: "object" + properties: + ID: + type: "string" + example: "blt1owaxmitz71s9v5zh81zun" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + example: "2017-07-20T13:55:28.678958722Z" + UpdatedAt: + type: "string" + format: "dateTime" + example: "2017-07-20T13:55:28.678958722Z" + Spec: + $ref: "#/definitions/SecretSpec" + + ConfigSpec: + type: "object" + properties: + Name: + description: "User-defined name of the config." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Data: + description: | + Data is the data to store as a config, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). + type: "string" + Templating: + description: | + Templating driver, if applicable + + Templating controls whether and how to evaluate the config payload as + a template. If no driver is set, no templating is used. + $ref: "#/definitions/Driver" + + Config: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ConfigSpec" + + ContainerState: + description: | + ContainerState stores container's running state. It's part of ContainerJSONBase + and will be returned by the "inspect" command. + type: "object" + x-nullable: true + properties: + Status: + description: | + String representation of the container state. Can be one of "created", + "running", "paused", "restarting", "removing", "exited", or "dead". + type: "string" + enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"] + example: "running" + Running: + description: | + Whether this container is running. + + Note that a running container can be _paused_. The `Running` and `Paused` + booleans are not mutually exclusive: + + When pausing a container (on Linux), the freezer cgroup is used to suspend + all processes in the container. Freezing the process requires the process to + be running. As a result, paused containers are both `Running` _and_ `Paused`. + + Use the `Status` field instead to determine if a container's state is "running". + type: "boolean" + example: true + Paused: + description: "Whether this container is paused." + type: "boolean" + example: false + Restarting: + description: "Whether this container is restarting." + type: "boolean" + example: false + OOMKilled: + description: | + Whether a process within this container has been killed because it ran + out of memory since the container was last started. + type: "boolean" + example: false + Dead: + type: "boolean" + example: false + Pid: + description: "The process ID of this container" + type: "integer" + example: 1234 + ExitCode: + description: "The last exit code of this container" + type: "integer" + example: 0 + Error: + type: "string" + StartedAt: + description: "The time when this container was last started." + type: "string" + example: "2020-01-06T09:06:59.461876391Z" + FinishedAt: + description: "The time when this container last exited." + type: "string" + example: "2020-01-06T09:07:59.461876391Z" + Health: + $ref: "#/definitions/Health" + + ContainerCreateResponse: + description: "OK response to ContainerCreate operation" + type: "object" + title: "ContainerCreateResponse" + x-go-name: "CreateResponse" + required: [Id, Warnings] + properties: + Id: + description: "The ID of the created container" + type: "string" + x-nullable: false + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Warnings: + description: "Warnings encountered when creating the container" + type: "array" + x-nullable: false + items: + type: "string" + example: [] + + ContainerUpdateResponse: + type: "object" + title: "ContainerUpdateResponse" + x-go-name: "UpdateResponse" + description: |- + Response for a successful container-update. + properties: + Warnings: + type: "array" + description: |- + Warnings encountered when updating the container. + items: + type: "string" + example: ["Published ports are discarded when using host network mode"] + + ContainerStatsResponse: + description: | + Statistics sample for a container. + type: "object" + x-go-name: "StatsResponse" + title: "ContainerStatsResponse" + properties: + name: + description: "Name of the container" + type: "string" + x-nullable: true + example: "boring_wozniak" + id: + description: "ID of the container" + type: "string" + x-nullable: true + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + read: + description: | + Date and time at which this sample was collected. + The value is formatted as [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + with nano-seconds. + type: "string" + format: "date-time" + example: "2025-01-16T13:55:22.165243637Z" + preread: + description: | + Date and time at which this first sample was collected. This field + is not propagated if the "one-shot" option is set. If the "one-shot" + option is set, this field may be omitted, empty, or set to a default + date (`0001-01-01T00:00:00Z`). + + The value is formatted as [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + with nano-seconds. + type: "string" + format: "date-time" + example: "2025-01-16T13:55:21.160452595Z" + pids_stats: + $ref: "#/definitions/ContainerPidsStats" + blkio_stats: + $ref: "#/definitions/ContainerBlkioStats" + num_procs: + description: | + The number of processors on the system. + + This field is Windows-specific and always zero for Linux containers. + type: "integer" + format: "uint32" + example: 16 + storage_stats: + $ref: "#/definitions/ContainerStorageStats" + cpu_stats: + $ref: "#/definitions/ContainerCPUStats" + precpu_stats: + $ref: "#/definitions/ContainerCPUStats" + memory_stats: + $ref: "#/definitions/ContainerMemoryStats" + networks: + description: | + Network statistics for the container per interface. + + This field is omitted if the container has no networking enabled. + x-nullable: true + additionalProperties: + $ref: "#/definitions/ContainerNetworkStats" + example: + eth0: + rx_bytes: 5338 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 36 + tx_bytes: 648 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 8 + eth5: + rx_bytes: 4641 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 26 + tx_bytes: 690 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 9 + + ContainerBlkioStats: + description: | + BlkioStats stores all IO service stats for data read and write. + + This type is Linux-specific and holds many fields that are specific to cgroups v1. + On a cgroup v2 host, all fields other than `io_service_bytes_recursive` + are omitted or `null`. + + This type is only populated on Linux and omitted for Windows containers. + type: "object" + x-go-name: "BlkioStats" + x-nullable: true + properties: + io_service_bytes_recursive: + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_serviced_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_queue_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_service_time_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_wait_time_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_merged_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_time_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + sectors_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + example: + io_service_bytes_recursive: [ + {"major": 254, "minor": 0, "op": "read", "value": 7593984}, + {"major": 254, "minor": 0, "op": "write", "value": 100} + ] + io_serviced_recursive: null + io_queue_recursive: null + io_service_time_recursive: null + io_wait_time_recursive: null + io_merged_recursive: null + io_time_recursive: null + sectors_recursive: null + + ContainerBlkioStatEntry: + description: | + Blkio stats entry. + + This type is Linux-specific and omitted for Windows containers. + type: "object" + x-go-name: "BlkioStatEntry" + x-nullable: true + properties: + major: + type: "integer" + format: "uint64" + example: 254 + minor: + type: "integer" + format: "uint64" + example: 0 + op: + type: "string" + example: "read" + value: + type: "integer" + format: "uint64" + example: 7593984 + + ContainerCPUStats: + description: | + CPU related info of the container + type: "object" + x-go-name: "CPUStats" + x-nullable: true + properties: + cpu_usage: + $ref: "#/definitions/ContainerCPUUsage" + system_cpu_usage: + description: | + System Usage. + + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 5 + online_cpus: + description: | + Number of online CPUs. + + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint32" + x-nullable: true + example: 5 + throttling_data: + $ref: "#/definitions/ContainerThrottlingData" + + ContainerCPUUsage: + description: | + All CPU stats aggregated since container inception. + type: "object" + x-go-name: "CPUUsage" + x-nullable: true + properties: + total_usage: + description: | + Total CPU time consumed in nanoseconds (Linux) or 100's of nanoseconds (Windows). + type: "integer" + format: "uint64" + example: 29912000 + percpu_usage: + description: | + Total CPU time (in nanoseconds) consumed per core (Linux). + + This field is Linux-specific when using cgroups v1. It is omitted + when using cgroups v2 and Windows containers. + type: "array" + x-nullable: true + items: + type: "integer" + format: "uint64" + example: 29912000 + + usage_in_kernelmode: + description: | + Time (in nanoseconds) spent by tasks of the cgroup in kernel mode (Linux), + or time spent (in 100's of nanoseconds) by all container processes in + kernel mode (Windows). + + Not populated for Windows containers using Hyper-V isolation. + type: "integer" + format: "uint64" + example: 21994000 + usage_in_usermode: + description: | + Time (in nanoseconds) spent by tasks of the cgroup in user mode (Linux), + or time spent (in 100's of nanoseconds) by all container processes in + kernel mode (Windows). + + Not populated for Windows containers using Hyper-V isolation. + type: "integer" + format: "uint64" + example: 7918000 + + ContainerPidsStats: + description: | + PidsStats contains Linux-specific stats of a container's process-IDs (PIDs). + + This type is Linux-specific and omitted for Windows containers. + type: "object" + x-go-name: "PidsStats" + x-nullable: true + properties: + current: + description: | + Current is the number of PIDs in the cgroup. + type: "integer" + format: "uint64" + x-nullable: true + example: 5 + limit: + description: | + Limit is the hard limit on the number of pids in the cgroup. + A "Limit" of 0 means that there is no limit. + type: "integer" + format: "uint64" + x-nullable: true + example: "18446744073709551615" + + ContainerThrottlingData: + description: | + CPU throttling stats of the container. + + This type is Linux-specific and omitted for Windows containers. + type: "object" + x-go-name: "ThrottlingData" + x-nullable: true + properties: + periods: + description: | + Number of periods with throttling active. + type: "integer" + format: "uint64" + example: 0 + throttled_periods: + description: | + Number of periods when the container hit its throttling limit. + type: "integer" + format: "uint64" + example: 0 + throttled_time: + description: | + Aggregated time (in nanoseconds) the container was throttled for. + type: "integer" + format: "uint64" + example: 0 + + ContainerMemoryStats: + description: | + Aggregates all memory stats since container inception on Linux. + Windows returns stats for commit and private working set only. + type: "object" + x-go-name: "MemoryStats" + properties: + usage: + description: | + Current `res_counter` usage for memory. + + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + max_usage: + description: | + Maximum usage ever recorded. + + This field is Linux-specific and only supported on cgroups v1. + It is omitted when using cgroups v2 and for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + stats: + description: | + All the stats exported via memory.stat. when using cgroups v2. + + This field is Linux-specific and omitted for Windows containers. + type: "object" + additionalProperties: + type: "integer" + format: "uint64" + x-nullable: true + example: + { + "active_anon": 1572864, + "active_file": 5115904, + "anon": 1572864, + "anon_thp": 0, + "file": 7626752, + "file_dirty": 0, + "file_mapped": 2723840, + "file_writeback": 0, + "inactive_anon": 0, + "inactive_file": 2510848, + "kernel_stack": 16384, + "pgactivate": 0, + "pgdeactivate": 0, + "pgfault": 2042, + "pglazyfree": 0, + "pglazyfreed": 0, + "pgmajfault": 45, + "pgrefill": 0, + "pgscan": 0, + "pgsteal": 0, + "shmem": 0, + "slab": 1180928, + "slab_reclaimable": 725576, + "slab_unreclaimable": 455352, + "sock": 0, + "thp_collapse_alloc": 0, + "thp_fault_alloc": 1, + "unevictable": 0, + "workingset_activate": 0, + "workingset_nodereclaim": 0, + "workingset_refault": 0 + } + failcnt: + description: | + Number of times memory usage hits limits. + + This field is Linux-specific and only supported on cgroups v1. + It is omitted when using cgroups v2 and for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + limit: + description: | + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 8217579520 + commitbytes: + description: | + Committed bytes. + + This field is Windows-specific and omitted for Linux containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + commitpeakbytes: + description: | + Peak committed bytes. + + This field is Windows-specific and omitted for Linux containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + privateworkingset: + description: | + Private working set. + + This field is Windows-specific and omitted for Linux containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + + ContainerNetworkStats: + description: | + Aggregates the network stats of one container + type: "object" + x-go-name: "NetworkStats" + x-nullable: true + properties: + rx_bytes: + description: | + Bytes received. Windows and Linux. + type: "integer" + format: "uint64" + example: 5338 + rx_packets: + description: | + Packets received. Windows and Linux. + type: "integer" + format: "uint64" + example: 36 + rx_errors: + description: | + Received errors. Not used on Windows. + + This field is Linux-specific and always zero for Windows containers. + type: "integer" + format: "uint64" + example: 0 + rx_dropped: + description: | + Incoming packets dropped. Windows and Linux. + type: "integer" + format: "uint64" + example: 0 + tx_bytes: + description: | + Bytes sent. Windows and Linux. + type: "integer" + format: "uint64" + example: 1200 + tx_packets: + description: | + Packets sent. Windows and Linux. + type: "integer" + format: "uint64" + example: 12 + tx_errors: + description: | + Sent errors. Not used on Windows. + + This field is Linux-specific and always zero for Windows containers. + type: "integer" + format: "uint64" + example: 0 + tx_dropped: + description: | + Outgoing packets dropped. Windows and Linux. + type: "integer" + format: "uint64" + example: 0 + endpoint_id: + description: | + Endpoint ID. Not used on Linux. + + This field is Windows-specific and omitted for Linux containers. + type: "string" + x-nullable: true + instance_id: + description: | + Instance ID. Not used on Linux. + + This field is Windows-specific and omitted for Linux containers. + type: "string" + x-nullable: true + + ContainerStorageStats: + description: | + StorageStats is the disk I/O stats for read/write on Windows. + + This type is Windows-specific and omitted for Linux containers. + type: "object" + x-go-name: "StorageStats" + x-nullable: true + properties: + read_count_normalized: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + read_size_bytes: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + write_count_normalized: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + write_size_bytes: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + + ContainerTopResponse: + type: "object" + x-go-name: "TopResponse" + title: "ContainerTopResponse" + description: |- + Container "top" response. + properties: + Titles: + description: "The ps column titles" + type: "array" + items: + type: "string" + example: + Titles: + - "UID" + - "PID" + - "PPID" + - "C" + - "STIME" + - "TTY" + - "TIME" + - "CMD" + Processes: + description: |- + Each process running in the container, where each process + is an array of values corresponding to the titles. + type: "array" + items: + type: "array" + items: + type: "string" + example: + Processes: + - + - "root" + - "13642" + - "882" + - "0" + - "17:03" + - "pts/0" + - "00:00:00" + - "/bin/bash" + - + - "root" + - "13735" + - "13642" + - "0" + - "17:06" + - "pts/0" + - "00:00:00" + - "sleep 10" + + ContainerWaitResponse: + description: "OK response to ContainerWait operation" + type: "object" + x-go-name: "WaitResponse" + title: "ContainerWaitResponse" + required: [StatusCode] + properties: + StatusCode: + description: "Exit code of the container" + type: "integer" + format: "int64" + x-nullable: false + Error: + $ref: "#/definitions/ContainerWaitExitError" + + ContainerWaitExitError: + description: "container waiting error, if any" + type: "object" + x-go-name: "WaitExitError" + properties: + Message: + description: "Details of an error" + type: "string" + + SystemVersion: + type: "object" + description: | + Response of Engine API: GET "/version" + properties: + Platform: + type: "object" + required: [Name] + properties: + Name: + type: "string" + Components: + type: "array" + description: | + Information about system components + items: + type: "object" + x-go-name: ComponentVersion + required: [Name, Version] + properties: + Name: + description: | + Name of the component + type: "string" + example: "Engine" + Version: + description: | + Version of the component + type: "string" + x-nullable: false + example: "27.0.1" + Details: + description: | + Key/value pairs of strings with additional information about the + component. These values are intended for informational purposes + only, and their content is not defined, and not part of the API + specification. + + These messages can be printed by the client as information to the user. + type: "object" + x-nullable: true + Version: + description: "The version of the daemon" + type: "string" + example: "27.0.1" + ApiVersion: + description: | + The default (and highest) API version that is supported by the daemon + type: "string" + example: "1.47" + MinAPIVersion: + description: | + The minimum API version that is supported by the daemon + type: "string" + example: "1.24" + GitCommit: + description: | + The Git commit of the source code that was used to build the daemon + type: "string" + example: "48a66213fe" + GoVersion: + description: | + The version Go used to compile the daemon, and the version of the Go + runtime in use. + type: "string" + example: "go1.22.7" + Os: + description: | + The operating system that the daemon is running on ("linux" or "windows") + type: "string" + example: "linux" + Arch: + description: | + The architecture that the daemon is running on + type: "string" + example: "amd64" + KernelVersion: + description: | + The kernel version (`uname -r`) that the daemon is running on. + + This field is omitted when empty. + type: "string" + example: "6.8.0-31-generic" + Experimental: + description: | + Indicates if the daemon is started with experimental features enabled. + + This field is omitted when empty / false. + type: "boolean" + example: true + BuildTime: + description: | + The date and time that the daemon was compiled. + type: "string" + example: "2020-06-22T15:49:27.000000000+00:00" + + SystemInfo: + type: "object" + properties: + ID: + description: | + Unique identifier of the daemon. + +


+ + > **Note**: The format of the ID itself is not part of the API, and + > should not be considered stable. + type: "string" + example: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" + Containers: + description: "Total number of containers on the host." + type: "integer" + example: 14 + ContainersRunning: + description: | + Number of containers with status `"running"`. + type: "integer" + example: 3 + ContainersPaused: + description: | + Number of containers with status `"paused"`. + type: "integer" + example: 1 + ContainersStopped: + description: | + Number of containers with status `"stopped"`. + type: "integer" + example: 10 + Images: + description: | + Total number of images on the host. + + Both _tagged_ and _untagged_ (dangling) images are counted. + type: "integer" + example: 508 + Driver: + description: "Name of the storage driver in use." + type: "string" + example: "overlay2" + DriverStatus: + description: | + Information specific to the storage driver, provided as + "label" / "value" pairs. + + This information is provided by the storage driver, and formatted + in a way consistent with the output of `docker info` on the command + line. + +


+ + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "array" + items: + type: "array" + items: + type: "string" + example: + - ["Backing Filesystem", "extfs"] + - ["Supports d_type", "true"] + - ["Native Overlay Diff", "true"] + DockerRootDir: + description: | + Root directory of persistent Docker state. + + Defaults to `/var/lib/docker` on Linux, and `C:\ProgramData\docker` + on Windows. + type: "string" + example: "/var/lib/docker" + Plugins: + $ref: "#/definitions/PluginsInfo" + MemoryLimit: + description: "Indicates if the host has memory limit support enabled." + type: "boolean" + example: true + SwapLimit: + description: "Indicates if the host has memory swap limit support enabled." + type: "boolean" + example: true + KernelMemoryTCP: + description: | + Indicates if the host has kernel memory TCP limit support enabled. This + field is omitted if not supported. + + Kernel memory TCP limits are not supported when using cgroups v2, which + does not support the corresponding `memory.kmem.tcp.limit_in_bytes` cgroup. + type: "boolean" + example: true + CpuCfsPeriod: + description: | + Indicates if CPU CFS(Completely Fair Scheduler) period is supported by + the host. + type: "boolean" + example: true + CpuCfsQuota: + description: | + Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by + the host. + type: "boolean" + example: true + CPUShares: + description: | + Indicates if CPU Shares limiting is supported by the host. + type: "boolean" + example: true + CPUSet: + description: | + Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host. + + See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt) + type: "boolean" + example: true + PidsLimit: + description: "Indicates if the host kernel has PID limit support enabled." + type: "boolean" + example: true + OomKillDisable: + description: "Indicates if OOM killer disable is supported on the host." + type: "boolean" + IPv4Forwarding: + description: "Indicates IPv4 forwarding is enabled." + type: "boolean" + example: true + BridgeNfIptables: + description: | + Indicates if `bridge-nf-call-iptables` is available on the host when + the daemon was started. + +


+ + > **Deprecated**: netfilter module is now loaded on-demand and no longer + > during daemon startup, making this field obsolete. This field is always + > `false` and will be removed in a API v1.49. + type: "boolean" + example: false + BridgeNfIp6tables: + description: | + Indicates if `bridge-nf-call-ip6tables` is available on the host. + +


+ + > **Deprecated**: netfilter module is now loaded on-demand, and no longer + > during daemon startup, making this field obsolete. This field is always + > `false` and will be removed in a API v1.49. + type: "boolean" + example: false + Debug: + description: | + Indicates if the daemon is running in debug-mode / with debug-level + logging enabled. + type: "boolean" + example: true + NFd: + description: | + The total number of file Descriptors in use by the daemon process. + + This information is only returned if debug-mode is enabled. + type: "integer" + example: 64 + NGoroutines: + description: | + The number of goroutines that currently exist. + + This information is only returned if debug-mode is enabled. + type: "integer" + example: 174 + SystemTime: + description: | + Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + format with nano-seconds. + type: "string" + example: "2017-08-08T20:28:29.06202363Z" + LoggingDriver: + description: | + The logging driver to use as a default for new containers. + type: "string" + CgroupDriver: + description: | + The driver to use for managing cgroups. + type: "string" + enum: ["cgroupfs", "systemd", "none"] + default: "cgroupfs" + example: "cgroupfs" + CgroupVersion: + description: | + The version of the cgroup. + type: "string" + enum: ["1", "2"] + default: "1" + example: "1" + NEventsListener: + description: "Number of event listeners subscribed." + type: "integer" + example: 30 + KernelVersion: + description: | + Kernel version of the host. + + On Linux, this information obtained from `uname`. On Windows this + information is queried from the HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\ + registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_. + type: "string" + example: "6.8.0-31-generic" + OperatingSystem: + description: | + Name of the host's operating system, for example: "Ubuntu 24.04 LTS" + or "Windows Server 2016 Datacenter" + type: "string" + example: "Ubuntu 24.04 LTS" + OSVersion: + description: | + Version of the host's operating system + +


+ + > **Note**: The information returned in this field, including its + > very existence, and the formatting of values, should not be considered + > stable, and may change without notice. + type: "string" + example: "24.04" + OSType: + description: | + Generic type of the operating system of the host, as returned by the + Go runtime (`GOOS`). + + Currently returned values are "linux" and "windows". A full list of + possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + type: "string" + example: "linux" + Architecture: + description: | + Hardware architecture of the host, as returned by the Go runtime + (`GOARCH`). + + A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + type: "string" + example: "x86_64" + NCPU: + description: | + The number of logical CPUs usable by the daemon. + + The number of available CPUs is checked by querying the operating + system when the daemon starts. Changes to operating system CPU + allocation after the daemon is started are not reflected. + type: "integer" + example: 4 + MemTotal: + description: | + Total amount of physical memory available on the host, in bytes. + type: "integer" + format: "int64" + example: 2095882240 + + IndexServerAddress: + description: | + Address / URL of the index server that is used for image search, + and as a default for user authentication for Docker Hub and Docker Cloud. + default: "https://index.docker.io/v1/" + type: "string" + example: "https://index.docker.io/v1/" + RegistryConfig: + $ref: "#/definitions/RegistryServiceConfig" + GenericResources: + $ref: "#/definitions/GenericResources" + HttpProxy: + description: | + HTTP-proxy configured for the daemon. This value is obtained from the + [`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + are masked in the API response. + + Containers do not automatically inherit this configuration. + type: "string" + example: "http://xxxxx:xxxxx@proxy.corp.example.com:8080" + HttpsProxy: + description: | + HTTPS-proxy configured for the daemon. This value is obtained from the + [`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + are masked in the API response. + + Containers do not automatically inherit this configuration. + type: "string" + example: "https://xxxxx:xxxxx@proxy.corp.example.com:4443" + NoProxy: + description: | + Comma-separated list of domain extensions for which no proxy should be + used. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) + environment variable. + + Containers do not automatically inherit this configuration. + type: "string" + example: "*.local, 169.254/16" + Name: + description: "Hostname of the host." + type: "string" + example: "node5.corp.example.com" + Labels: + description: | + User-defined labels (key/value metadata) as set on the daemon. + +


+ + > **Note**: When part of a Swarm, nodes can both have _daemon_ labels, + > set through the daemon configuration, and _node_ labels, set from a + > manager node in the Swarm. Node labels are not included in this + > field. Node labels can be retrieved using the `/nodes/(id)` endpoint + > on a manager node in the Swarm. + type: "array" + items: + type: "string" + example: ["storage=ssd", "production"] + ExperimentalBuild: + description: | + Indicates if experimental features are enabled on the daemon. + type: "boolean" + example: true + ServerVersion: + description: | + Version string of the daemon. + type: "string" + example: "27.0.1" + Runtimes: + description: | + List of [OCI compliant](https://github.com/opencontainers/runtime-spec) + runtimes configured on the daemon. Keys hold the "name" used to + reference the runtime. + + The Docker daemon relies on an OCI compliant runtime (invoked via the + `containerd` daemon) as its interface to the Linux kernel namespaces, + cgroups, and SELinux. + + The default runtime is `runc`, and automatically configured. Additional + runtimes can be configured by the user and will be listed here. + type: "object" + additionalProperties: + $ref: "#/definitions/Runtime" + default: + runc: + path: "runc" + example: + runc: + path: "runc" + runc-master: + path: "/go/bin/runc" + custom: + path: "/usr/local/bin/my-oci-runtime" + runtimeArgs: ["--debug", "--systemd-cgroup=false"] + DefaultRuntime: + description: | + Name of the default OCI runtime that is used when starting containers. + + The default can be overridden per-container at create time. + type: "string" + default: "runc" + example: "runc" + Swarm: + $ref: "#/definitions/SwarmInfo" + LiveRestoreEnabled: + description: | + Indicates if live restore is enabled. + + If enabled, containers are kept running when the daemon is shutdown + or upon daemon start if running containers are detected. + type: "boolean" + default: false + example: false + Isolation: + description: | + Represents the isolation technology to use as a default for containers. + The supported values are platform-specific. + + If no isolation value is specified on daemon start, on Windows client, + the default is `hyperv`, and on Windows server, the default is `process`. + + This option is currently not used on other platforms. + default: "default" + type: "string" + enum: + - "default" + - "hyperv" + - "process" + - "" + InitBinary: + description: | + Name and, optional, path of the `docker-init` binary. + + If the path is omitted, the daemon searches the host's `$PATH` for the + binary and uses the first result. + type: "string" + example: "docker-init" + ContainerdCommit: + $ref: "#/definitions/Commit" + RuncCommit: + $ref: "#/definitions/Commit" + InitCommit: + $ref: "#/definitions/Commit" + SecurityOptions: + description: | + List of security features that are enabled on the daemon, such as + apparmor, seccomp, SELinux, user-namespaces (userns), rootless and + no-new-privileges. + + Additional configuration options for each security feature may + be present, and are included as a comma-separated list of key/value + pairs. + type: "array" + items: + type: "string" + example: + - "name=apparmor" + - "name=seccomp,profile=default" + - "name=selinux" + - "name=userns" + - "name=rootless" + ProductLicense: + description: | + Reports a summary of the product license on the daemon. + + If a commercial license has been applied to the daemon, information + such as number of nodes, and expiration are included. + type: "string" + example: "Community Engine" + DefaultAddressPools: + description: | + List of custom default address pools for local networks, which can be + specified in the daemon.json file or dockerd option. + + Example: a Base "10.10.0.0/16" with Size 24 will define the set of 256 + 10.10.[0-255].0/24 address pools. + type: "array" + items: + type: "object" + properties: + Base: + description: "The network address in CIDR format" + type: "string" + example: "10.10.0.0/16" + Size: + description: "The network pool size" + type: "integer" + example: "24" + FirewallBackend: + $ref: "#/definitions/FirewallInfo" + DiscoveredDevices: + description: | + List of devices discovered by device drivers. + + Each device includes information about its source driver, kind, name, + and additional driver-specific attributes. + type: "array" + items: + $ref: "#/definitions/DeviceInfo" + Warnings: + description: | + List of warnings / informational messages about missing features, or + issues related to the daemon configuration. + + These messages can be printed by the client as information to the user. + type: "array" + items: + type: "string" + example: + - "WARNING: No memory limit support" + CDISpecDirs: + description: | + List of directories where (Container Device Interface) CDI + specifications are located. + + These specifications define vendor-specific modifications to an OCI + runtime specification for a container being created. + + An empty list indicates that CDI device injection is disabled. + + Note that since using CDI device injection requires the daemon to have + experimental enabled. For non-experimental daemons an empty list will + always be returned. + type: "array" + items: + type: "string" + example: + - "/etc/cdi" + - "/var/run/cdi" + Containerd: + $ref: "#/definitions/ContainerdInfo" + + ContainerdInfo: + description: | + Information for connecting to the containerd instance that is used by the daemon. + This is included for debugging purposes only. + type: "object" + x-nullable: true + properties: + Address: + description: "The address of the containerd socket." + type: "string" + example: "/run/containerd/containerd.sock" + Namespaces: + description: | + The namespaces that the daemon uses for running containers and + plugins in containerd. These namespaces can be configured in the + daemon configuration, and are considered to be used exclusively + by the daemon, Tampering with the containerd instance may cause + unexpected behavior. + + As these namespaces are considered to be exclusively accessed + by the daemon, it is not recommended to change these values, + or to change them to a value that is used by other systems, + such as cri-containerd. + type: "object" + properties: + Containers: + description: | + The default containerd namespace used for containers managed + by the daemon. + + The default namespace for containers is "moby", but will be + suffixed with the `.` of the remapped `root` if + user-namespaces are enabled and the containerd image-store + is used. + type: "string" + default: "moby" + example: "moby" + Plugins: + description: | + The default containerd namespace used for plugins managed by + the daemon. + + The default namespace for plugins is "plugins.moby", but will be + suffixed with the `.` of the remapped `root` if + user-namespaces are enabled and the containerd image-store + is used. + type: "string" + default: "plugins.moby" + example: "plugins.moby" + + FirewallInfo: + description: | + Information about the daemon's firewalling configuration. + + This field is currently only used on Linux, and omitted on other platforms. + type: "object" + x-nullable: true + properties: + Driver: + description: | + The name of the firewall backend driver. + type: "string" + example: "nftables" + Info: + description: | + Information about the firewall backend, provided as + "label" / "value" pairs. + +


+ + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "array" + items: + type: "array" + items: + type: "string" + example: + - ["ReloadedAt", "2025-01-01T00:00:00Z"] + + # PluginsInfo is a temp struct holding Plugins name + # registered with docker daemon. It is used by Info struct + PluginsInfo: + description: | + Available plugins per type. + +


+ + > **Note**: Only unmanaged (V1) plugins are included in this list. + > V1 plugins are "lazily" loaded, and are not returned in this list + > if there is no resource using the plugin. + type: "object" + properties: + Volume: + description: "Names of available volume-drivers, and network-driver plugins." + type: "array" + items: + type: "string" + example: ["local"] + Network: + description: "Names of available network-drivers, and network-driver plugins." + type: "array" + items: + type: "string" + example: ["bridge", "host", "ipvlan", "macvlan", "null", "overlay"] + Authorization: + description: "Names of available authorization plugins." + type: "array" + items: + type: "string" + example: ["img-authz-plugin", "hbm"] + Log: + description: "Names of available logging-drivers, and logging-driver plugins." + type: "array" + items: + type: "string" + example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "splunk", "syslog"] + + + RegistryServiceConfig: + description: | + RegistryServiceConfig stores daemon registry services configuration. + type: "object" + x-nullable: true + properties: + InsecureRegistryCIDRs: + description: | + List of IP ranges of insecure registries, using the CIDR syntax + ([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries + accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates + from unknown CAs) communication. + + By default, local registries (`::1/128` and `127.0.0.0/8`) are configured as + insecure. All other registries are secure. Communicating with an + insecure registry is not possible if the daemon assumes that registry + is secure. + + This configuration override this behavior, insecure communication with + registries whose resolved IP address is within the subnet described by + the CIDR syntax. + + Registries can also be marked insecure by hostname. Those registries + are listed under `IndexConfigs` and have their `Secure` field set to + `false`. + + > **Warning**: Using this option can be useful when running a local + > registry, but introduces security vulnerabilities. This option + > should therefore ONLY be used for testing purposes. For increased + > security, users should add their CA to their system's list of trusted + > CAs instead of enabling this option. + type: "array" + items: + type: "string" + example: ["::1/128", "127.0.0.0/8"] + IndexConfigs: + type: "object" + additionalProperties: + $ref: "#/definitions/IndexInfo" + example: + "127.0.0.1:5000": + "Name": "127.0.0.1:5000" + "Mirrors": [] + "Secure": false + "Official": false + "[2001:db8:a0b:12f0::1]:80": + "Name": "[2001:db8:a0b:12f0::1]:80" + "Mirrors": [] + "Secure": false + "Official": false + "docker.io": + Name: "docker.io" + Mirrors: ["https://hub-mirror.corp.example.com:5000/"] + Secure: true + Official: true + "registry.internal.corp.example.com:3000": + Name: "registry.internal.corp.example.com:3000" + Mirrors: [] + Secure: false + Official: false + Mirrors: + description: | + List of registry URLs that act as a mirror for the official + (`docker.io`) registry. + + type: "array" + items: + type: "string" + example: + - "https://hub-mirror.corp.example.com:5000/" + - "https://[2001:db8:a0b:12f0::1]/" + + IndexInfo: + description: + IndexInfo contains information about a registry. + type: "object" + x-nullable: true + properties: + Name: + description: | + Name of the registry, such as "docker.io". + type: "string" + example: "docker.io" + Mirrors: + description: | + List of mirrors, expressed as URIs. + type: "array" + items: + type: "string" + example: + - "https://hub-mirror.corp.example.com:5000/" + - "https://registry-2.docker.io/" + - "https://registry-3.docker.io/" + Secure: + description: | + Indicates if the registry is part of the list of insecure + registries. + + If `false`, the registry is insecure. Insecure registries accept + un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from + unknown CAs) communication. + + > **Warning**: Insecure registries can be useful when running a local + > registry. However, because its use creates security vulnerabilities + > it should ONLY be enabled for testing purposes. For increased + > security, users should add their CA to their system's list of + > trusted CAs instead of enabling this option. + type: "boolean" + example: true + Official: + description: | + Indicates whether this is an official registry (i.e., Docker Hub / docker.io) + type: "boolean" + example: true + + Runtime: + description: | + Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec) + runtime. + + The runtime is invoked by the daemon via the `containerd` daemon. OCI + runtimes act as an interface to the Linux kernel namespaces, cgroups, + and SELinux. + type: "object" + properties: + path: + description: | + Name and, optional, path, of the OCI executable binary. + + If the path is omitted, the daemon searches the host's `$PATH` for the + binary and uses the first result. + type: "string" + example: "/usr/local/bin/my-oci-runtime" + runtimeArgs: + description: | + List of command-line arguments to pass to the runtime when invoked. + type: "array" + x-nullable: true + items: + type: "string" + example: ["--debug", "--systemd-cgroup=false"] + status: + description: | + Information specific to the runtime. + + While this API specification does not define data provided by runtimes, + the following well-known properties may be provided by runtimes: + + `org.opencontainers.runtime-spec.features`: features structure as defined + in the [OCI Runtime Specification](https://github.com/opencontainers/runtime-spec/blob/main/features.md), + in a JSON string representation. + +


+ + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + "org.opencontainers.runtime-spec.features": "{\"ociVersionMin\":\"1.0.0\",\"ociVersionMax\":\"1.1.0\",\"...\":\"...\"}" + + Commit: + description: | + Commit holds the Git-commit (SHA1) that a binary was built from, as + reported in the version-string of external tools, such as `containerd`, + or `runC`. + type: "object" + properties: + ID: + description: "Actual commit ID of external tool." + type: "string" + example: "cfb82a876ecc11b5ca0977d1733adbe58599088a" + + SwarmInfo: + description: | + Represents generic information about swarm. + type: "object" + properties: + NodeID: + description: "Unique identifier of for this node in the swarm." + type: "string" + default: "" + example: "k67qz4598weg5unwwffg6z1m1" + NodeAddr: + description: | + IP address at which this node can be reached by other nodes in the + swarm. + type: "string" + default: "" + example: "10.0.0.46" + LocalNodeState: + $ref: "#/definitions/LocalNodeState" + ControlAvailable: + type: "boolean" + default: false + example: true + Error: + type: "string" + default: "" + RemoteManagers: + description: | + List of ID's and addresses of other managers in the swarm. + type: "array" + default: null + x-nullable: true + items: + $ref: "#/definitions/PeerNode" + example: + - NodeID: "71izy0goik036k48jg985xnds" + Addr: "10.0.0.158:2377" + - NodeID: "79y6h1o4gv8n120drcprv5nmc" + Addr: "10.0.0.159:2377" + - NodeID: "k67qz4598weg5unwwffg6z1m1" + Addr: "10.0.0.46:2377" + Nodes: + description: "Total number of nodes in the swarm." + type: "integer" + x-nullable: true + example: 4 + Managers: + description: "Total number of managers in the swarm." + type: "integer" + x-nullable: true + example: 3 + Cluster: + $ref: "#/definitions/ClusterInfo" + + LocalNodeState: + description: "Current local status of this node." + type: "string" + default: "" + enum: + - "" + - "inactive" + - "pending" + - "active" + - "error" + - "locked" + example: "active" + + PeerNode: + description: "Represents a peer-node in the swarm" + type: "object" + properties: + NodeID: + description: "Unique identifier of for this node in the swarm." + type: "string" + Addr: + description: | + IP address and ports at which this node can be reached. + type: "string" + + NetworkAttachmentConfig: + description: | + Specifies how a service should be attached to a particular network. + type: "object" + properties: + Target: + description: | + The target network for attachment. Must be a network name or ID. + type: "string" + Aliases: + description: | + Discoverable alternate names for the service on this network. + type: "array" + items: + type: "string" + DriverOpts: + description: | + Driver attachment options for the network target. + type: "object" + additionalProperties: + type: "string" + + EventActor: + description: | + Actor describes something that generates events, like a container, network, + or a volume. + type: "object" + properties: + ID: + description: "The ID of the object emitting the event" + type: "string" + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Attributes: + description: | + Various key/value attributes of the object, depending on its type. + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-label-value" + image: "alpine:latest" + name: "my-container" + + EventMessage: + description: | + EventMessage represents the information an event contains. + type: "object" + title: "SystemEventsResponse" + properties: + Type: + description: "The type of object emitting the event" + type: "string" + enum: ["builder", "config", "container", "daemon", "image", "network", "node", "plugin", "secret", "service", "volume"] + example: "container" + Action: + description: "The type of event" + type: "string" + example: "create" + Actor: + $ref: "#/definitions/EventActor" + scope: + description: | + Scope of the event. Engine events are `local` scope. Cluster (Swarm) + events are `swarm` scope. + type: "string" + enum: ["local", "swarm"] + time: + description: "Timestamp of event" + type: "integer" + format: "int64" + example: 1629574695 + timeNano: + description: "Timestamp of event, with nanosecond accuracy" + type: "integer" + format: "int64" + example: 1629574695515050031 + + OCIDescriptor: + type: "object" + x-go-name: Descriptor + description: | + A descriptor struct containing digest, media type, and size, as defined in + the [OCI Content Descriptors Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/descriptor.md). + properties: + mediaType: + description: | + The media type of the object this schema refers to. + type: "string" + example: "application/vnd.oci.image.manifest.v1+json" + digest: + description: | + The digest of the targeted content. + type: "string" + example: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" + size: + description: | + The size in bytes of the blob. + type: "integer" + format: "int64" + example: 424 + urls: + description: |- + List of URLs from which this object MAY be downloaded. + type: "array" + items: + type: "string" + format: "uri" + x-nullable: true + annotations: + description: |- + Arbitrary metadata relating to the targeted content. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + "com.docker.official-images.bashbrew.arch": "amd64" + "org.opencontainers.image.base.digest": "sha256:0d0ef5c914d3ea700147da1bd050c59edb8bb12ca312f3800b29d7c8087eabd8" + "org.opencontainers.image.base.name": "scratch" + "org.opencontainers.image.created": "2025-01-27T00:00:00Z" + "org.opencontainers.image.revision": "9fabb4bad5138435b01857e2fe9363e2dc5f6a79" + "org.opencontainers.image.source": "https://git.launchpad.net/cloud-images/+oci/ubuntu-base" + "org.opencontainers.image.url": "https://hub.docker.com/_/ubuntu" + "org.opencontainers.image.version": "24.04" + data: + type: string + x-nullable: true + description: |- + Data is an embedding of the targeted content. This is encoded as a base64 + string when marshalled to JSON (automatically, by encoding/json). If + present, Data can be used directly to avoid fetching the targeted content. + example: null + platform: + $ref: "#/definitions/OCIPlatform" + artifactType: + description: |- + ArtifactType is the IANA media type of this artifact. + type: "string" + x-nullable: true + example: null + + OCIPlatform: + type: "object" + x-go-name: Platform + x-nullable: true + description: | + Describes the platform which the image in the manifest runs on, as defined + in the [OCI Image Index Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/image-index.md). + properties: + architecture: + description: | + The CPU architecture, for example `amd64` or `ppc64`. + type: "string" + example: "arm" + os: + description: | + The operating system, for example `linux` or `windows`. + type: "string" + example: "windows" + os.version: + description: | + Optional field specifying the operating system version, for example on + Windows `10.0.19041.1165`. + type: "string" + example: "10.0.19041.1165" + os.features: + description: | + Optional field specifying an array of strings, each listing a required + OS feature (for example on Windows `win32k`). + type: "array" + items: + type: "string" + example: + - "win32k" + variant: + description: | + Optional field specifying a variant of the CPU, for example `v7` to + specify ARMv7 when architecture is `arm`. + type: "string" + example: "v7" + + DistributionInspect: + type: "object" + x-go-name: DistributionInspect + title: "DistributionInspectResponse" + required: [Descriptor, Platforms] + description: | + Describes the result obtained from contacting the registry to retrieve + image metadata. + properties: + Descriptor: + $ref: "#/definitions/OCIDescriptor" + Platforms: + type: "array" + description: | + An array containing all platforms supported by the image. + items: + $ref: "#/definitions/OCIPlatform" + + ClusterVolume: + type: "object" + description: | + Options and information specific to, and only present on, Swarm CSI + cluster volumes. + properties: + ID: + type: "string" + description: | + The Swarm ID of this volume. Because cluster volumes are Swarm + objects, they have an ID, unlike non-cluster volumes. This ID can + be used to refer to the Volume instead of the name. + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ClusterVolumeSpec" + Info: + type: "object" + description: | + Information about the global status of the volume. + properties: + CapacityBytes: + type: "integer" + format: "int64" + description: | + The capacity of the volume in bytes. A value of 0 indicates that + the capacity is unknown. + VolumeContext: + type: "object" + description: | + A map of strings to strings returned from the storage plugin when + the volume is created. + additionalProperties: + type: "string" + VolumeID: + type: "string" + description: | + The ID of the volume as returned by the CSI storage plugin. This + is distinct from the volume's ID as provided by Docker. This ID + is never used by the user when communicating with Docker to refer + to this volume. If the ID is blank, then the Volume has not been + successfully created in the plugin yet. + AccessibleTopology: + type: "array" + description: | + The topology this volume is actually accessible from. + items: + $ref: "#/definitions/Topology" + PublishStatus: + type: "array" + description: | + The status of the volume as it pertains to its publishing and use on + specific nodes + items: + type: "object" + properties: + NodeID: + type: "string" + description: | + The ID of the Swarm node the volume is published on. + State: + type: "string" + description: | + The published state of the volume. + * `pending-publish` The volume should be published to this node, but the call to the controller plugin to do so has not yet been successfully completed. + * `published` The volume is published successfully to the node. + * `pending-node-unpublish` The volume should be unpublished from the node, and the manager is awaiting confirmation from the worker that it has done so. + * `pending-controller-unpublish` The volume is successfully unpublished from the node, but has not yet been successfully unpublished on the controller. + enum: + - "pending-publish" + - "published" + - "pending-node-unpublish" + - "pending-controller-unpublish" + PublishContext: + type: "object" + description: | + A map of strings to strings returned by the CSI controller + plugin when a volume is published. + additionalProperties: + type: "string" + + ClusterVolumeSpec: + type: "object" + description: | + Cluster-specific options used to create the volume. + properties: + Group: + type: "string" + description: | + Group defines the volume group of this volume. Volumes belonging to + the same group can be referred to by group name when creating + Services. Referring to a volume by group instructs Swarm to treat + volumes in that group interchangeably for the purpose of scheduling. + Volumes with an empty string for a group technically all belong to + the same, emptystring group. + AccessMode: + type: "object" + description: | + Defines how the volume is used by tasks. + properties: + Scope: + type: "string" + description: | + The set of nodes this volume can be used on at one time. + - `single` The volume may only be scheduled to one node at a time. + - `multi` the volume may be scheduled to any supported number of nodes at a time. + default: "single" + enum: ["single", "multi"] + x-nullable: false + Sharing: + type: "string" + description: | + The number and way that different tasks can use this volume + at one time. + - `none` The volume may only be used by one task at a time. + - `readonly` The volume may be used by any number of tasks, but they all must mount the volume as readonly + - `onewriter` The volume may be used by any number of tasks, but only one may mount it as read/write. + - `all` The volume may have any number of readers and writers. + default: "none" + enum: ["none", "readonly", "onewriter", "all"] + x-nullable: false + MountVolume: + type: "object" + description: | + Options for using this volume as a Mount-type volume. + + Either MountVolume or BlockVolume, but not both, must be + present. + properties: + FsType: + type: "string" + description: | + Specifies the filesystem type for the mount volume. + Optional. + MountFlags: + type: "array" + description: | + Flags to pass when mounting the volume. Optional. + items: + type: "string" + BlockVolume: + type: "object" + description: | + Options for using this volume as a Block-type volume. + Intentionally empty. + Secrets: + type: "array" + description: | + Swarm Secrets that are passed to the CSI storage plugin when + operating on this volume. + items: + type: "object" + description: | + One cluster volume secret entry. Defines a key-value pair that + is passed to the plugin. + properties: + Key: + type: "string" + description: | + Key is the name of the key of the key-value pair passed to + the plugin. + Secret: + type: "string" + description: | + Secret is the swarm Secret object from which to read data. + This can be a Secret name or ID. The Secret data is + retrieved by swarm and used as the value of the key-value + pair passed to the plugin. + AccessibilityRequirements: + type: "object" + description: | + Requirements for the accessible topology of the volume. These + fields are optional. For an in-depth description of what these + fields mean, see the CSI specification. + properties: + Requisite: + type: "array" + description: | + A list of required topologies, at least one of which the + volume must be accessible from. + items: + $ref: "#/definitions/Topology" + Preferred: + type: "array" + description: | + A list of topologies that the volume should attempt to be + provisioned in. + items: + $ref: "#/definitions/Topology" + CapacityRange: + type: "object" + description: | + The desired capacity that the volume should be created with. If + empty, the plugin will decide the capacity. + properties: + RequiredBytes: + type: "integer" + format: "int64" + description: | + The volume must be at least this big. The value of 0 + indicates an unspecified minimum + LimitBytes: + type: "integer" + format: "int64" + description: | + The volume must not be bigger than this. The value of 0 + indicates an unspecified maximum. + Availability: + type: "string" + description: | + The availability of the volume for use in tasks. + - `active` The volume is fully available for scheduling on the cluster + - `pause` No new workloads should use the volume, but existing workloads are not stopped. + - `drain` All workloads using this volume should be stopped and rescheduled, and no new ones should be started. + default: "active" + x-nullable: false + enum: + - "active" + - "pause" + - "drain" + + Topology: + description: | + A map of topological domains to topological segments. For in depth + details, see documentation for the Topology object in the CSI + specification. + type: "object" + additionalProperties: + type: "string" + + ImageManifestSummary: + x-go-name: "ManifestSummary" + description: | + ImageManifestSummary represents a summary of an image manifest. + type: "object" + required: ["ID", "Descriptor", "Available", "Size", "Kind"] + properties: + ID: + description: | + ID is the content-addressable ID of an image and is the same as the + digest of the image manifest. + type: "string" + example: "sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f" + Descriptor: + $ref: "#/definitions/OCIDescriptor" + Available: + description: Indicates whether all the child content (image config, layers) is fully available locally. + type: "boolean" + example: true + Size: + type: "object" + x-nullable: false + required: ["Content", "Total"] + properties: + Total: + type: "integer" + format: "int64" + example: 8213251 + description: | + Total is the total size (in bytes) of all the locally present + data (both distributable and non-distributable) that's related to + this manifest and its children. + This equal to the sum of [Content] size AND all the sizes in the + [Size] struct present in the Kind-specific data struct. + For example, for an image kind (Kind == "image") + this would include the size of the image content and unpacked + image snapshots ([Size.Content] + [ImageData.Size.Unpacked]). + Content: + description: | + Content is the size (in bytes) of all the locally present + content in the content store (e.g. image config, layers) + referenced by this manifest and its children. + This only includes blobs in the content store. + type: "integer" + format: "int64" + example: 3987495 + Kind: + type: "string" + example: "image" + enum: + - "image" + - "attestation" + - "unknown" + description: | + The kind of the manifest. + + kind | description + -------------|----------------------------------------------------------- + image | Image manifest that can be used to start a container. + attestation | Attestation manifest produced by the Buildkit builder for a specific image manifest. + ImageData: + description: | + The image data for the image manifest. + This field is only populated when Kind is "image". + type: "object" + x-nullable: true + x-omitempty: true + required: ["Platform", "Containers", "Size", "UnpackedSize"] + properties: + Platform: + $ref: "#/definitions/OCIPlatform" + description: | + OCI platform of the image. This will be the platform specified in the + manifest descriptor from the index/manifest list. + If it's not available, it will be obtained from the image config. + Containers: + description: | + The IDs of the containers that are using this image. + type: "array" + items: + type: "string" + example: ["ede54ee1fda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c7430", "abadbce344c096744d8d6071a90d474d28af8f1034b5ea9fb03c3f4bfc6d005e"] + Size: + type: "object" + x-nullable: false + required: ["Unpacked"] + properties: + Unpacked: + type: "integer" + format: "int64" + example: 3987495 + description: | + Unpacked is the size (in bytes) of the locally unpacked + (uncompressed) image content that's directly usable by the containers + running this image. + It's independent of the distributable content - e.g. + the image might still have an unpacked data that's still used by + some container even when the distributable/compressed content is + already gone. + AttestationData: + description: | + The image data for the attestation manifest. + This field is only populated when Kind is "attestation". + type: "object" + x-nullable: true + x-omitempty: true + required: ["For"] + properties: + For: + description: | + The digest of the image manifest that this attestation is for. + type: "string" + example: "sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f" + +paths: + /containers/json: + get: + summary: "List containers" + description: | + Returns a list of containers. For details on the format, see the + [inspect endpoint](#operation/ContainerInspect). + + Note that it uses a different, smaller representation of a container + than inspecting a single container. For example, the list of linked + containers is not propagated . + operationId: "ContainerList" + produces: + - "application/json" + parameters: + - name: "all" + in: "query" + description: | + Return all containers. By default, only running containers are shown. + type: "boolean" + default: false + - name: "limit" + in: "query" + description: | + Return this number of most recently created containers, including + non-running ones. + type: "integer" + - name: "size" + in: "query" + description: | + Return the size of container as fields `SizeRw` and `SizeRootFs`. + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + Filters to process on the container list, encoded as JSON (a + `map[string][]string`). For example, `{"status": ["paused"]}` will + only return paused containers. + + Available filters: + + - `ancestor`=(`[:]`, ``, or ``) + - `before`=(`` or ``) + - `expose`=(`[/]`|`/[]`) + - `exited=` containers with exit code of `` + - `health`=(`starting`|`healthy`|`unhealthy`|`none`) + - `id=` a container's ID + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + - `is-task=`(`true`|`false`) + - `label=key` or `label="key=value"` of a container label + - `name=` a container's name + - `network`=(`` or ``) + - `publish`=(`[/]`|`/[]`) + - `since`=(`` or ``) + - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) + - `volume`=(`` or ``) + type: "string" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/ContainerSummary" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/create: + post: + summary: "Create a container" + operationId: "ContainerCreate" + consumes: + - "application/json" + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "name" + in: "query" + description: | + Assign the specified name to the container. Must match + `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`. + type: "string" + pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$" + - name: "platform" + in: "query" + description: | + Platform in the format `os[/arch[/variant]]` used for image lookup. + + When specified, the daemon checks if the requested image is present + in the local image cache with the given OS and Architecture, and + otherwise returns a `404` status. + + If the option is not set, the host's native OS and Architecture are + used to look up the image in the image cache. However, if no platform + is passed and the given image does exist in the local image cache, + but its OS or architecture does not match, the container is created + with the available image, and a warning is added to the `Warnings` + field in the response, for example; + + WARNING: The requested image's platform (linux/arm64/v8) does not + match the detected host platform (linux/amd64) and no + specific platform was requested + + type: "string" + default: "" + - name: "body" + in: "body" + description: "Container to create" + schema: + allOf: + - $ref: "#/definitions/ContainerConfig" + - type: "object" + properties: + HostConfig: + $ref: "#/definitions/HostConfig" + NetworkingConfig: + $ref: "#/definitions/NetworkingConfig" + example: + Hostname: "" + Domainname: "" + User: "" + AttachStdin: false + AttachStdout: true + AttachStderr: true + Tty: false + OpenStdin: false + StdinOnce: false + Env: + - "FOO=bar" + - "BAZ=quux" + Cmd: + - "date" + Entrypoint: "" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + Volumes: + /volumes/data: {} + WorkingDir: "" + NetworkDisabled: false + MacAddress: "12:34:56:78:9a:bc" + ExposedPorts: + 22/tcp: {} + StopSignal: "SIGTERM" + StopTimeout: 10 + HostConfig: + Binds: + - "/tmp:/tmp" + Links: + - "redis3:redis" + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + NanoCpus: 500000 + CpuPercent: 80 + CpuShares: 512 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpuQuota: 50000 + CpusetCpus: "0,1" + CpusetMems: "0,1" + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 300 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceWriteIOps: + - {} + DeviceRequests: + - Driver: "nvidia" + Count: -1 + DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] + Capabilities: [["gpu", "nvidia", "compute"]] + Options: + property1: "string" + property2: "string" + MemorySwappiness: 60 + OomKillDisable: false + OomScoreAdj: 500 + PidMode: "" + PidsLimit: 0 + PortBindings: + 22/tcp: + - HostPort: "11022" + PublishAllPorts: false + Privileged: false + ReadonlyRootfs: false + Dns: + - "8.8.8.8" + DnsOptions: + - "" + DnsSearch: + - "" + VolumesFrom: + - "parent" + - "other:ro" + CapAdd: + - "NET_ADMIN" + CapDrop: + - "MKNOD" + GroupAdd: + - "newgroup" + RestartPolicy: + Name: "" + MaximumRetryCount: 0 + AutoRemove: true + NetworkMode: "bridge" + Devices: [] + Ulimits: + - {} + LogConfig: + Type: "json-file" + Config: {} + SecurityOpt: [] + StorageOpt: {} + CgroupParent: "" + VolumeDriver: "" + ShmSize: 67108864 + NetworkingConfig: + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + database_nw: {} + + required: true + responses: + 201: + description: "Container created successfully" + schema: + $ref: "#/definitions/ContainerCreateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/{id}/json: + get: + summary: "Inspect a container" + description: "Return low-level information about a container." + operationId: "ContainerInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerInspectResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "size" + in: "query" + type: "boolean" + default: false + description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" + tags: ["Container"] + /containers/{id}/top: + get: + summary: "List processes running inside a container" + description: | + On Unix systems, this is done by running the `ps` command. This endpoint + is not supported on Windows. + operationId: "ContainerTop" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerTopResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "ps_args" + in: "query" + description: "The arguments to pass to `ps`. For example, `aux`" + type: "string" + default: "-ef" + tags: ["Container"] + /containers/{id}/logs: + get: + summary: "Get container logs" + description: | + Get `stdout` and `stderr` logs from a container. + + Note: This endpoint works only for containers with the `json-file` or + `journald` logging driver. + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + operationId: "ContainerLogs" + responses: + 200: + description: | + logs returned as a stream in response body. + For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + Note that unlike the attach endpoint, the logs endpoint does not + upgrade the connection and does not set Content-Type. + schema: + type: "string" + format: "binary" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "until" + in: "query" + description: "Only return logs before this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Container"] + /containers/{id}/changes: + get: + summary: "Get changes on a container’s filesystem" + description: | + Returns which files in a container's filesystem have been added, deleted, + or modified. The `Kind` of modification can be one of: + + - `0`: Modified ("C") + - `1`: Added ("A") + - `2`: Deleted ("D") + operationId: "ContainerChanges" + produces: ["application/json"] + responses: + 200: + description: "The list of changes" + schema: + type: "array" + items: + $ref: "#/definitions/FilesystemChange" + examples: + application/json: + - Path: "/dev" + Kind: 0 + - Path: "/dev/kmsg" + Kind: 1 + - Path: "/test" + Kind: 1 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/export: + get: + summary: "Export a container" + description: "Export the contents of a container as a tarball." + operationId: "ContainerExport" + produces: + - "application/octet-stream" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/stats: + get: + summary: "Get container stats based on resource usage" + description: | + This endpoint returns a live stream of a container’s resource usage + statistics. + + The `precpu_stats` is the CPU statistic of the *previous* read, and is + used to calculate the CPU usage percentage. It is not an exact copy + of the `cpu_stats` field. + + If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is + nil then for compatibility with older daemons the length of the + corresponding `cpu_usage.percpu_usage` array should be used. + + On a cgroup v2 host, the following fields are not set + * `blkio_stats`: all fields other than `io_service_bytes_recursive` + * `cpu_stats`: `cpu_usage.percpu_usage` + * `memory_stats`: `max_usage` and `failcnt` + Also, `memory_stats.stats` fields are incompatible with cgroup v1. + + To calculate the values shown by the `stats` command of the docker cli tool + the following formulas can be used: + * used_memory = `memory_stats.usage - memory_stats.stats.cache` + * available_memory = `memory_stats.limit` + * Memory usage % = `(used_memory / available_memory) * 100.0` + * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` + * system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage` + * number_cpus = `length(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus` + * CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0` + operationId: "ContainerStats" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerStatsResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "stream" + in: "query" + description: | + Stream the output. If false, the stats will be output once and then + it will disconnect. + type: "boolean" + default: true + - name: "one-shot" + in: "query" + description: | + Only get a single stat instead of waiting for 2 cycles. Must be used + with `stream=false`. + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/resize: + post: + summary: "Resize a container TTY" + description: "Resize the TTY for a container." + operationId: "ContainerResize" + consumes: + - "application/octet-stream" + produces: + - "text/plain" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "cannot resize container" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "h" + in: "query" + required: true + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + required: true + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Container"] + /containers/{id}/start: + post: + summary: "Start a container" + operationId: "ContainerStart" + responses: + 204: + description: "no error" + 304: + description: "container already started" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container. Format is a + single character `[a-Z]` or `ctrl-` where `` is one + of: `a-z`, `@`, `^`, `[`, `,` or `_`. + type: "string" + tags: ["Container"] + /containers/{id}/stop: + post: + summary: "Stop a container" + operationId: "ContainerStop" + responses: + 204: + description: "no error" + 304: + description: "container already stopped" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/restart: + post: + summary: "Restart a container" + operationId: "ContainerRestart" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/kill: + post: + summary: "Kill a container" + description: | + Send a POSIX signal to a container, defaulting to killing to the + container. + operationId: "ContainerKill" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is not running" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "Container d37cde0fe4ad63c3a7252023b2f9800282894247d145cb5933ddf6e52cc03a28 is not running" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + default: "SIGKILL" + tags: ["Container"] + /containers/{id}/update: + post: + summary: "Update a container" + description: | + Change various configuration options of a container without having to + recreate it. + operationId: "ContainerUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "The container has been updated." + schema: + $ref: "#/definitions/ContainerUpdateResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "update" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + example: + BlkioWeight: 300 + CpuShares: 512 + CpuPeriod: 100000 + CpuQuota: 50000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpusetCpus: "0,1" + CpusetMems: "0" + Memory: 314572800 + MemorySwap: 514288000 + MemoryReservation: 209715200 + RestartPolicy: + MaximumRetryCount: 4 + Name: "on-failure" + tags: ["Container"] + /containers/{id}/rename: + post: + summary: "Rename a container" + operationId: "ContainerRename" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "name already in use" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "name" + in: "query" + required: true + description: "New name for the container" + type: "string" + tags: ["Container"] + /containers/{id}/pause: + post: + summary: "Pause a container" + description: | + Use the freezer cgroup to suspend all processes in a container. + + Traditionally, when suspending a process the `SIGSTOP` signal is used, + which is observable by the process being suspended. With the freezer + cgroup the process is unaware, and unable to capture, that it is being + suspended, and subsequently resumed. + operationId: "ContainerPause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/unpause: + post: + summary: "Unpause a container" + description: "Resume a container which has been paused." + operationId: "ContainerUnpause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/attach: + post: + summary: "Attach to a container" + description: | + Attach to a container to read its output or send it input. You can attach + to the same container multiple times and you can reattach to containers + that have been detached. + + Either the `stream` or `logs` parameter must be `true` for this endpoint + to do anything. + + See the [documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) + for more details. + + ### Hijacking + + This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, + and `stderr` on the same socket. + + This is the response from the daemon for an attach request: + + ``` + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + [STREAM] + ``` + + After the headers and two new lines, the TCP connection can now be used + for raw, bidirectional communication between the client and server. + + To hint potential proxies about connection hijacking, the Docker client + can also optionally send connection upgrade headers. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 + Upgrade: tcp + Connection: Upgrade + ``` + + The Docker daemon will respond with a `101 UPGRADED` response, and will + similarly follow with the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + [STREAM] + ``` + + ### Stream format + + When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), + the HTTP Content-Type header is set to application/vnd.docker.multiplexed-stream + and the stream over the hijacked connected is multiplexed to separate out + `stdout` and `stderr`. The stream consists of a series of frames, each + containing a header and a payload. + + The header contains the information which the stream writes (`stdout` or + `stderr`). It also contains the size of the associated frame encoded in + the last four bytes (`uint32`). + + It is encoded on the first eight bytes like this: + + ```go + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + ``` + + `STREAM_TYPE` can be: + + - 0: `stdin` (is written on `stdout`) + - 1: `stdout` + - 2: `stderr` + + `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size + encoded as big endian. + + Following the header is the payload, which is the specified number of + bytes of `STREAM_TYPE`. + + The simplest way to implement this protocol is the following: + + 1. Read 8 bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + + ### Stream format when using a TTY + + When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), + the stream is not multiplexed. The data exchanged over the hijacked + connection is simply the raw data from the process PTY and client's + `stdin`. + + operationId: "ContainerAttach" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container.Format is a single + character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, + `@`, `^`, `[`, `,` or `_`. + type: "string" + - name: "logs" + in: "query" + description: | + Replay previous logs from the container. + + This is useful for attaching to a container that has started and you + want to output everything since the container started. + + If `stream` is also enabled, once all the previous output has been + returned, it will seamlessly transition into streaming current + output. + type: "boolean" + default: false + - name: "stream" + in: "query" + description: | + Stream attached streams from the time the request was made onwards. + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/attach/ws: + get: + summary: "Attach to a container via a websocket" + operationId: "ContainerAttachWebsocket" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container.Format is a single + character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, + `@`, `^`, `[`, `,`, or `_`. + type: "string" + - name: "logs" + in: "query" + description: "Return logs" + type: "boolean" + default: false + - name: "stream" + in: "query" + description: "Return stream" + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/wait: + post: + summary: "Wait for a container" + description: "Block until a container stops, then returns the exit code." + operationId: "ContainerWait" + produces: ["application/json"] + responses: + 200: + description: "The container has exit." + schema: + $ref: "#/definitions/ContainerWaitResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "condition" + in: "query" + description: | + Wait until a container state reaches the given condition. + + Defaults to `not-running` if omitted or empty. + type: "string" + enum: + - "not-running" + - "next-exit" + - "removed" + default: "not-running" + tags: ["Container"] + /containers/{id}: + delete: + summary: "Remove a container" + operationId: "ContainerDelete" + responses: + 204: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: | + You cannot remove a running container: c2ada9df5af8. Stop the + container before attempting removal or force remove + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "v" + in: "query" + description: "Remove anonymous volumes associated with the container." + type: "boolean" + default: false + - name: "force" + in: "query" + description: "If the container is running, kill it before removing it." + type: "boolean" + default: false + - name: "link" + in: "query" + description: "Remove the specified link associated with the container." + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/archive: + head: + summary: "Get information about files in a container" + description: | + A response header `X-Docker-Container-Path-Stat` is returned, containing + a base64 - encoded JSON object with some filesystem header information + about the path. + operationId: "ContainerArchiveInfo" + responses: + 200: + description: "no error" + headers: + X-Docker-Container-Path-Stat: + type: "string" + description: | + A base64 - encoded JSON object with some filesystem header + information about the path + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + get: + summary: "Get an archive of a filesystem resource in a container" + description: "Get a tar archive of a resource in the filesystem of container id." + operationId: "ContainerArchive" + produces: ["application/x-tar"] + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + put: + summary: "Extract an archive of files or folders to a directory in a container" + description: | + Upload a tar archive to be extracted to a path in the filesystem of container id. + `path` parameter is asserted to be a directory. If it exists as a file, 400 error + will be returned with message "not a directory". + operationId: "PutContainerArchive" + consumes: ["application/x-tar", "application/octet-stream"] + responses: + 200: + description: "The content was extracted successfully" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "not a directory" + 403: + description: "Permission denied, the volume or container rootfs is marked as read-only." + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such container or path does not exist inside the container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Path to a directory in the container to extract the archive’s contents into. " + type: "string" + - name: "noOverwriteDirNonDir" + in: "query" + description: | + If `1`, `true`, or `True` then it will be an error if unpacking the + given content would cause an existing directory to be replaced with + a non-directory and vice versa. + type: "string" + - name: "copyUIDGID" + in: "query" + description: | + If `1`, `true`, then it will copy UID/GID maps to the dest file or + dir + type: "string" + - name: "inputStream" + in: "body" + required: true + description: | + The input stream must be a tar archive compressed with one of the + following algorithms: `identity` (no compression), `gzip`, `bzip2`, + or `xz`. + schema: + type: "string" + format: "binary" + tags: ["Container"] + /containers/prune: + post: + summary: "Delete stopped containers" + produces: + - "application/json" + operationId: "ContainerPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=` Prune containers created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune containers with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ContainerPruneResponse" + properties: + ContainersDeleted: + description: "Container IDs that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /images/json: + get: + summary: "List Images" + description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." + operationId: "ImageList" + produces: + - "application/json" + responses: + 200: + description: "Summary image data for the images matching the query" + schema: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "all" + in: "query" + description: "Show all images. Only images from a final layer (no children) are shown by default." + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the images list. + + Available filters: + + - `before`=(`[:]`, `` or ``) + - `dangling=true` + - `label=key` or `label="key=value"` of an image label + - `reference`=(`[:]`) + - `since`=(`[:]`, `` or ``) + - `until=` + type: "string" + - name: "shared-size" + in: "query" + description: "Compute and show shared size as a `SharedSize` field on each image." + type: "boolean" + default: false + - name: "digests" + in: "query" + description: "Show digest information as a `RepoDigests` field on each image." + type: "boolean" + default: false + - name: "manifests" + in: "query" + description: "Include `Manifests` in the image summary." + type: "boolean" + default: false + tags: ["Image"] + /build: + post: + summary: "Build an image" + description: | + Build an image from a tar archive with a `Dockerfile` in it. + + The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). + + The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. + + The build is canceled if the client drops the connection by quitting or being killed. + operationId: "ImageBuild" + consumes: + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "inputStream" + in: "body" + description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." + schema: + type: "string" + format: "binary" + - name: "dockerfile" + in: "query" + description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." + type: "string" + default: "Dockerfile" + - name: "t" + in: "query" + description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." + type: "string" + - name: "extrahosts" + in: "query" + description: "Extra hosts to add to /etc/hosts" + type: "string" + - name: "remote" + in: "query" + description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." + type: "string" + - name: "q" + in: "query" + description: "Suppress verbose build output." + type: "boolean" + default: false + - name: "nocache" + in: "query" + description: "Do not use the cache when building the image." + type: "boolean" + default: false + - name: "cachefrom" + in: "query" + description: "JSON array of images used for build cache resolution." + type: "string" + - name: "pull" + in: "query" + description: "Attempt to pull the image even if an older image exists locally." + type: "string" + - name: "rm" + in: "query" + description: "Remove intermediate containers after a successful build." + type: "boolean" + default: true + - name: "forcerm" + in: "query" + description: "Always remove intermediate containers, even upon failure." + type: "boolean" + default: false + - name: "memory" + in: "query" + description: "Set memory limit for build." + type: "integer" + - name: "memswap" + in: "query" + description: "Total memory (memory + swap). Set as `-1` to disable swap." + type: "integer" + - name: "cpushares" + in: "query" + description: "CPU shares (relative weight)." + type: "integer" + - name: "cpusetcpus" + in: "query" + description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." + type: "string" + - name: "cpuperiod" + in: "query" + description: "The length of a CPU period in microseconds." + type: "integer" + - name: "cpuquota" + in: "query" + description: "Microseconds of CPU time that the container can get in a CPU period." + type: "integer" + - name: "buildargs" + in: "query" + description: > + JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker + uses the buildargs as the environment context for commands run via the `Dockerfile` RUN + instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for + passing secret values. + + + For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the + query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded. + + + [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg) + type: "string" + - name: "shmsize" + in: "query" + description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." + type: "integer" + - name: "squash" + in: "query" + description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" + type: "boolean" + - name: "labels" + in: "query" + description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." + type: "string" + - name: "networkmode" + in: "query" + description: | + Sets the networking mode for the run commands during build. Supported + standard values are: `bridge`, `host`, `none`, and `container:`. + Any other value is taken as a custom network's name or ID to which this + container should connect to. + type: "string" + - name: "Content-type" + in: "header" + type: "string" + enum: + - "application/x-tar" + default: "application/x-tar" + - name: "X-Registry-Config" + in: "header" + description: | + This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. + + The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: + + ``` + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + ``` + + Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. + type: "string" + - name: "platform" + in: "query" + description: "Platform in the format os[/arch[/variant]]" + type: "string" + default: "" + - name: "target" + in: "query" + description: "Target build stage" + type: "string" + default: "" + - name: "outputs" + in: "query" + description: "BuildKit output configuration" + type: "string" + default: "" + - name: "version" + in: "query" + type: "string" + default: "1" + enum: ["1", "2"] + description: | + Version of the builder backend to use. + + - `1` is the first generation classic (deprecated) builder in the Docker daemon (default) + - `2` is [BuildKit](https://github.com/moby/buildkit) + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /build/prune: + post: + summary: "Delete builder cache" + produces: + - "application/json" + operationId: "BuildPrune" + parameters: + - name: "keep-storage" + in: "query" + description: | + Amount of disk space in bytes to keep for cache + + > **Deprecated**: This parameter is deprecated and has been renamed to "reserved-space". + > It is kept for backward compatibility and will be removed in API v1.49. + type: "integer" + format: "int64" + - name: "reserved-space" + in: "query" + description: "Amount of disk space in bytes to keep for cache" + type: "integer" + format: "int64" + - name: "max-used-space" + in: "query" + description: "Maximum amount of disk space allowed to keep for cache" + type: "integer" + format: "int64" + - name: "min-free-space" + in: "query" + description: "Target amount of free disk space after pruning" + type: "integer" + format: "int64" + - name: "all" + in: "query" + type: "boolean" + description: "Remove all types of build cache" + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the list of build cache objects. + + Available filters: + + - `until=` remove cache older than ``. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon's local time. + - `id=` + - `parent=` + - `type=` + - `description=` + - `inuse` + - `shared` + - `private` + responses: + 200: + description: "No error" + schema: + type: "object" + title: "BuildPruneResponse" + properties: + CachesDeleted: + type: "array" + items: + description: "ID of build cache object" + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /images/create: + post: + summary: "Create an image" + description: "Pull or import an image." + operationId: "ImageCreate" + consumes: + - "text/plain" + - "application/octet-stream" + produces: + - "application/json" + responses: + 200: + description: "no error" + 404: + description: "repository does not exist or no read access" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "fromImage" + in: "query" + description: | + Name of the image to pull. If the name includes a tag or digest, specific behavior applies: + + - If only `fromImage` includes a tag, that tag is used. + - If both `fromImage` and `tag` are provided, `tag` takes precedence. + - If `fromImage` includes a digest, the image is pulled by digest, and `tag` is ignored. + - If neither a tag nor digest is specified, all tags are pulled. + type: "string" + - name: "fromSrc" + in: "query" + description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." + type: "string" + - name: "repo" + in: "query" + description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." + type: "string" + - name: "tag" + in: "query" + description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." + type: "string" + - name: "message" + in: "query" + description: "Set commit message for imported image." + type: "string" + - name: "inputImage" + in: "body" + description: "Image content if the value `-` has been specified in fromSrc query parameter" + schema: + type: "string" + required: false + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "changes" + in: "query" + description: | + Apply `Dockerfile` instructions to the image that is created, + for example: `changes=ENV DEBUG=true`. + Note that `ENV DEBUG=true` should be URI component encoded. + + Supported `Dockerfile` instructions: + `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` + type: "array" + items: + type: "string" + - name: "platform" + in: "query" + description: | + Platform in the format os[/arch[/variant]]. + + When used in combination with the `fromImage` option, the daemon checks + if the given image is present in the local image cache with the given + OS and Architecture, and otherwise attempts to pull the image. If the + option is not set, the host's native OS and Architecture are used. + If the given image does not exist in the local image cache, the daemon + attempts to pull the image with the host's native OS and Architecture. + If the given image does exists in the local image cache, but its OS or + architecture does not match, a warning is produced. + + When used with the `fromSrc` option to import an image from an archive, + this option sets the platform information for the imported image. If + the option is not set, the host's native OS and Architecture are used + for the imported image. + type: "string" + default: "" + tags: ["Image"] + /images/{name}/json: + get: + summary: "Inspect an image" + description: "Return low-level information about an image." + operationId: "ImageInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/ImageInspect" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + - name: "manifests" + in: "query" + description: "Include Manifests in the image summary." + type: "boolean" + default: false + required: false + tags: ["Image"] + /images/{name}/history: + get: + summary: "Get the history of an image" + description: "Return parent layers of an image." + operationId: "ImageHistory" + produces: ["application/json"] + responses: + 200: + description: "List of image layers" + schema: + type: "array" + items: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false + examples: + application/json: + - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" + Created: 1398108230 + CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" + Tags: + - "ubuntu:lucid" + - "ubuntu:10.04" + Size: 182964289 + Comment: "" + - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" + Created: 1398108222 + CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" + Tags: [] + Size: 0 + Comment: "" + - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" + Created: 1371157430 + CreatedBy: "" + Tags: + - "scratch12:latest" + - "scratch:latest" + Size: 0 + Comment: "Imported from -" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "platform" + type: "string" + in: "query" + description: | + JSON-encoded OCI platform to select the platform-variant. + If omitted, it defaults to any locally available platform, + prioritizing the daemon's host platform. + + If the daemon provides a multi-platform image store, this selects + the platform-variant to show the history for. If the image is + a single-platform image, or if the multi-platform image does not + provide a variant matching the given platform, an error is returned. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /images/{name}/push: + post: + summary: "Push an image" + description: | + Push an image to a registry. + + If you wish to push an image on to a private registry, that image must + already have a tag which references the registry. For example, + `registry.example.com/myimage:latest`. + + The push is cancelled if the HTTP connection is closed. + operationId: "ImagePush" + consumes: + - "application/octet-stream" + responses: + 200: + description: "No error" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + Name of the image to push. For example, `registry.example.com/myimage`. + The image must be present in the local image store with the same name. + + The name should be provided without tag; if a tag is provided, it + is ignored. For example, `registry.example.com/myimage:latest` is + considered equivalent to `registry.example.com/myimage`. + + Use the `tag` parameter to specify the tag to push. + type: "string" + required: true + - name: "tag" + in: "query" + description: | + Tag of the image to push. For example, `latest`. If no tag is provided, + all tags of the given image that are present in the local image store + are pushed. + type: "string" + - name: "platform" + type: "string" + in: "query" + description: | + JSON-encoded OCI platform to select the platform-variant to push. + If not provided, all available variants will attempt to be pushed. + + If the daemon provides a multi-platform image store, this selects + the platform-variant to push to the registry. If the image is + a single-platform image, or if the multi-platform image does not + provide a variant matching the given platform, an error is returned. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + required: true + tags: ["Image"] + /images/{name}/tag: + post: + summary: "Tag an image" + description: "Tag an image so that it becomes part of a repository." + operationId: "ImageTag" + responses: + 201: + description: "No error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID to tag." + type: "string" + required: true + - name: "repo" + in: "query" + description: "The repository to tag in. For example, `someuser/someimage`." + type: "string" + - name: "tag" + in: "query" + description: "The name of the new tag." + type: "string" + tags: ["Image"] + /images/{name}: + delete: + summary: "Remove an image" + description: | + Remove an image, along with any untagged parent images that were + referenced by that image. + + Images can't be removed if they have descendant images, are being + used by a running container or are being used by a build. + operationId: "ImageDelete" + produces: ["application/json"] + responses: + 200: + description: "The image was deleted successfully" + schema: + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + examples: + application/json: + - Untagged: "3e2f21a89f" + - Deleted: "3e2f21a89f" + - Deleted: "53b4f83ac9" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "force" + in: "query" + description: "Remove the image even if it is being used by stopped containers or has other tags" + type: "boolean" + default: false + - name: "noprune" + in: "query" + description: "Do not delete untagged parent images" + type: "boolean" + default: false + - name: "platforms" + in: "query" + description: | + Select platform-specific content to delete. + Multiple values are accepted. + Each platform is a OCI platform encoded as a JSON string. + type: "array" + items: + # This should be OCIPlatform + # but $ref is not supported for array in query in Swagger 2.0 + # $ref: "#/definitions/OCIPlatform" + type: "string" + tags: ["Image"] + /images/search: + get: + summary: "Search images" + description: "Search for an image on Docker Hub." + operationId: "ImageSearch" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + type: "object" + title: "ImageSearchResponseItem" + properties: + description: + type: "string" + is_official: + type: "boolean" + is_automated: + description: | + Whether this repository has automated builds enabled. + +


+ + > **Deprecated**: This field is deprecated and will always be "false". + type: "boolean" + example: false + name: + type: "string" + star_count: + type: "integer" + examples: + application/json: + - description: "A minimal Docker image based on Alpine Linux with a complete package index and only 5 MB in size!" + is_official: true + is_automated: false + name: "alpine" + star_count: 10093 + - description: "Busybox base image." + is_official: true + is_automated: false + name: "Busybox base image." + star_count: 3037 + - description: "The PostgreSQL object-relational database system provides reliability and data integrity." + is_official: true + is_automated: false + name: "postgres" + star_count: 12408 + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "term" + in: "query" + description: "Term to search" + type: "string" + required: true + - name: "limit" + in: "query" + description: "Maximum number of results to return" + type: "integer" + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: + + - `is-official=(true|false)` + - `stars=` Matches images that has at least 'number' stars. + type: "string" + tags: ["Image"] + /images/prune: + post: + summary: "Delete unused images" + produces: + - "application/json" + operationId: "ImagePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: + + - `dangling=` When set to `true` (or `1`), prune only + unused *and* untagged images. When set to `false` + (or `0`), all unused images are pruned. + - `until=` Prune images created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune images with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ImagePruneResponse" + properties: + ImagesDeleted: + description: "Images that were deleted" + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /auth: + post: + summary: "Check auth configuration" + description: | + Validate credentials for a registry and, if available, get an identity + token for accessing the registry without password. + operationId: "SystemAuth" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "An identity token was generated successfully." + schema: + type: "object" + title: "SystemAuthResponse" + required: [Status] + properties: + Status: + description: "The status of the authentication" + type: "string" + x-nullable: false + IdentityToken: + description: "An opaque token used to authenticate a user after a successful login" + type: "string" + x-nullable: false + examples: + application/json: + Status: "Login Succeeded" + IdentityToken: "9cbaf023786cd7..." + 204: + description: "No error" + 401: + description: "Auth error" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "authConfig" + in: "body" + description: "Authentication to check" + schema: + $ref: "#/definitions/AuthConfig" + tags: ["System"] + /info: + get: + summary: "Get system information" + operationId: "SystemInfo" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/SystemInfo" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /version: + get: + summary: "Get version" + description: "Returns the version of Docker that is running and various information about the system that Docker is running on." + operationId: "SystemVersion" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/SystemVersion" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /_ping: + get: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPing" + produces: ["text/plain"] + responses: + 200: + description: "no error" + schema: + type: "string" + example: "OK" + headers: + Api-Version: + type: "string" + description: "Max API Version the server supports" + Builder-Version: + type: "string" + description: | + Default version of docker image builder + + The default on Linux is version "2" (BuildKit), but the daemon + can be configured to recommend version "1" (classic Builder). + Windows does not yet support BuildKit for native Windows images, + and uses "1" (classic builder) as a default. + + This value is a recommendation as advertised by the daemon, and + it is up to the client to choose which builder to use. + default: "2" + Docker-Experimental: + type: "boolean" + description: "If the server is running with experimental mode enabled" + Swarm: + type: "string" + enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] + description: | + Contains information about Swarm status of the daemon, + and if the daemon is acting as a manager or worker node. + default: "inactive" + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + headers: + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + tags: ["System"] + head: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPingHead" + produces: ["text/plain"] + responses: + 200: + description: "no error" + schema: + type: "string" + example: "(empty)" + headers: + Api-Version: + type: "string" + description: "Max API Version the server supports" + Builder-Version: + type: "string" + description: "Default version of docker image builder" + Docker-Experimental: + type: "boolean" + description: "If the server is running with experimental mode enabled" + Swarm: + type: "string" + enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] + description: | + Contains information about Swarm status of the daemon, + and if the daemon is acting as a manager or worker node. + default: "inactive" + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /commit: + post: + summary: "Create a new image from a container" + operationId: "ImageCommit" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "containerConfig" + in: "body" + description: "The container configuration" + schema: + $ref: "#/definitions/ContainerConfig" + - name: "container" + in: "query" + description: "The ID or name of the container to commit" + type: "string" + - name: "repo" + in: "query" + description: "Repository name for the created image" + type: "string" + - name: "tag" + in: "query" + description: "Tag name for the create image" + type: "string" + - name: "comment" + in: "query" + description: "Commit message" + type: "string" + - name: "author" + in: "query" + description: "Author of the image (e.g., `John Hannibal Smith `)" + type: "string" + - name: "pause" + in: "query" + description: "Whether to pause the container before committing" + type: "boolean" + default: true + - name: "changes" + in: "query" + description: "`Dockerfile` instructions to apply while committing" + type: "string" + tags: ["Image"] + /events: + get: + summary: "Monitor events" + description: | + Stream real-time events from the server. + + Various objects within Docker report events when something happens to them. + + Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, `update`, and `prune` + + Images report these events: `create`, `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune` + + Volumes report these events: `create`, `mount`, `unmount`, `destroy`, and `prune` + + Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, `remove`, and `prune` + + The Docker daemon reports these events: `reload` + + Services report these events: `create`, `update`, and `remove` + + Nodes report these events: `create`, `update`, and `remove` + + Secrets report these events: `create`, `update`, and `remove` + + Configs report these events: `create`, `update`, and `remove` + + The Builder reports `prune` events + + operationId: "SystemEvents" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/EventMessage" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "since" + in: "query" + description: "Show events created since this timestamp then stream new events." + type: "string" + - name: "until" + in: "query" + description: "Show events created until this timestamp then stop streaming." + type: "string" + - name: "filters" + in: "query" + description: | + A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: + + - `config=` config name or ID + - `container=` container name or ID + - `daemon=` daemon name or ID + - `event=` event type + - `image=` image name or ID + - `label=` image or container label + - `network=` network name or ID + - `node=` node ID + - `plugin`= plugin name or ID + - `scope`= local or swarm + - `secret=` secret name or ID + - `service=` service name or ID + - `type=` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config` + - `volume=` volume name + type: "string" + tags: ["System"] + /system/df: + get: + summary: "Get data usage information" + operationId: "SystemDataUsage" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "SystemDataUsageResponse" + properties: + LayersSize: + type: "integer" + format: "int64" + Images: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + Containers: + type: "array" + items: + $ref: "#/definitions/ContainerSummary" + Volumes: + type: "array" + items: + $ref: "#/definitions/Volume" + BuildCache: + type: "array" + items: + $ref: "#/definitions/BuildCache" + example: + LayersSize: 1092588 + Images: + - + Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + ParentId: "" + RepoTags: + - "busybox:latest" + RepoDigests: + - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6" + Created: 1466724217 + Size: 1092588 + SharedSize: 0 + Labels: {} + Containers: 1 + Containers: + - + Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148" + Names: + - "/top" + Image: "busybox" + ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + Command: "top" + Created: 1472592424 + Ports: [] + SizeRootFs: 1092588 + Labels: {} + State: "exited" + Status: "Exited (0) 56 minutes ago" + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + IPAMConfig: null + Links: null + Aliases: null + NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92" + EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a" + Gateway: "172.18.0.1" + IPAddress: "172.18.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:12:00:02" + Mounts: [] + Volumes: + - + Name: "my-volume" + Driver: "local" + Mountpoint: "/var/lib/docker/volumes/my-volume/_data" + Labels: null + Scope: "local" + Options: null + UsageData: + Size: 10920104 + RefCount: 2 + BuildCache: + - + ID: "hw53o5aio51xtltp5xjp8v7fx" + Parents: [] + Type: "regular" + Description: "pulled from docker.io/library/debian@sha256:234cb88d3020898631af0ccbbcca9a66ae7306ecd30c9720690858c1b007d2a0" + InUse: false + Shared: true + Size: 0 + CreatedAt: "2021-06-28T13:31:01.474619385Z" + LastUsedAt: "2021-07-07T22:02:32.738075951Z" + UsageCount: 26 + - + ID: "ndlpt0hhvkqcdfkputsk4cq9c" + Parents: ["ndlpt0hhvkqcdfkputsk4cq9c"] + Type: "regular" + Description: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" + InUse: false + Shared: true + Size: 51 + CreatedAt: "2021-06-28T13:31:03.002625487Z" + LastUsedAt: "2021-07-07T22:02:32.773909517Z" + UsageCount: 26 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "type" + in: "query" + description: | + Object types, for which to compute and return data. + type: "array" + collectionFormat: multi + items: + type: "string" + enum: ["container", "image", "volume", "build-cache"] + tags: ["System"] + /images/{name}/get: + get: + summary: "Export an image" + description: | + Get a tarball containing all images and metadata for a repository. + + If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. + + ### Image tarball format + + An image tarball contains [Content as defined in the OCI Image Layout Specification](https://github.com/opencontainers/image-spec/blob/v1.1.1/image-layout.md#content). + + Additionally, includes the manifest.json file associated with a backwards compatible docker save format. + + If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. + + ```json + { + "hello-world": { + "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" + } + } + ``` + operationId: "ImageGet" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "platform" + type: "string" + in: "query" + description: | + JSON encoded OCI platform describing a platform which will be used + to select a platform-specific image to be saved if the image is + multi-platform. + If not provided, the full multi-platform image will be saved. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /images/get: + get: + summary: "Export several images" + description: | + Get a tarball containing all images and metadata for several image + repositories. + + For each value of the `names` parameter: if it is a specific name and + tag (e.g. `ubuntu:latest`), then only that image (and its parents) are + returned; if it is an image ID, similarly only that image (and its parents) + are returned and there would be no names referenced in the 'repositories' + file for this image ID. + + For details on the format, see the [export image endpoint](#operation/ImageGet). + operationId: "ImageGetAll" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "names" + in: "query" + description: "Image names to filter by" + type: "array" + items: + type: "string" + - name: "platform" + type: "string" + in: "query" + description: | + JSON encoded OCI platform describing a platform which will be used + to select a platform-specific image to be saved if the image is + multi-platform. + If not provided, the full multi-platform image will be saved. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /images/load: + post: + summary: "Import images" + description: | + Load a set of images and tags into a repository. + + For details on the format, see the [export image endpoint](#operation/ImageGet). + operationId: "ImageLoad" + consumes: + - "application/x-tar" + produces: + - "application/json" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "imagesTarball" + in: "body" + description: "Tar archive containing images" + schema: + type: "string" + format: "binary" + - name: "quiet" + in: "query" + description: "Suppress progress details during load." + type: "boolean" + default: false + - name: "platform" + type: "string" + in: "query" + description: | + JSON encoded OCI platform describing a platform which will be used + to select a platform-specific image to be load if the image is + multi-platform. + If not provided, the full multi-platform image will be loaded. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /containers/{id}/exec: + post: + summary: "Create an exec instance" + description: "Run a command inside a running container." + operationId: "ContainerExec" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is paused" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execConfig" + in: "body" + description: "Exec configuration" + schema: + type: "object" + title: "ExecConfig" + properties: + AttachStdin: + type: "boolean" + description: "Attach to `stdin` of the exec command." + AttachStdout: + type: "boolean" + description: "Attach to `stdout` of the exec command." + AttachStderr: + type: "boolean" + description: "Attach to `stderr` of the exec command." + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array." + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + example: [80, 64] + DetachKeys: + type: "string" + description: | + Override the key sequence for detaching a container. Format is + a single character `[a-Z]` or `ctrl-` where `` + is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + Env: + description: | + A list of environment variables in the form `["VAR=value", ...]`. + type: "array" + items: + type: "string" + Cmd: + type: "array" + description: "Command to run, as a string or array of strings." + items: + type: "string" + Privileged: + type: "boolean" + description: "Runs the exec process with extended privileges." + default: false + User: + type: "string" + description: | + The user, and optionally, group to run the exec process inside + the container. Format is one of: `user`, `user:group`, `uid`, + or `uid:gid`. + WorkingDir: + type: "string" + description: | + The working directory for the exec process inside the container. + example: + AttachStdin: false + AttachStdout: true + AttachStderr: true + DetachKeys: "ctrl-p,ctrl-q" + Tty: false + Cmd: + - "date" + Env: + - "FOO=bar" + - "BAZ=quux" + required: true + - name: "id" + in: "path" + description: "ID or name of container" + type: "string" + required: true + tags: ["Exec"] + /exec/{id}/start: + post: + summary: "Start an exec instance" + description: | + Starts a previously set up exec instance. If detach is true, this endpoint + returns immediately after starting the command. Otherwise, it sets up an + interactive session with the command. + operationId: "ExecStart" + consumes: + - "application/json" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 200: + description: "No error" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Container is stopped or paused" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execStartConfig" + in: "body" + schema: + type: "object" + title: "ExecStartConfig" + properties: + Detach: + type: "boolean" + description: "Detach from the command." + example: false + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + example: true + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array." + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + example: [80, 64] + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + /exec/{id}/resize: + post: + summary: "Resize an exec instance" + description: | + Resize the TTY session used by an exec instance. This endpoint only works + if `tty` was specified as part of creating and starting the exec instance. + operationId: "ExecResize" + responses: + 200: + description: "No error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + - name: "h" + in: "query" + required: true + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + required: true + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Exec"] + /exec/{id}/json: + get: + summary: "Inspect an exec instance" + description: "Return low-level information about an exec instance." + operationId: "ExecInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ExecInspectResponse" + properties: + CanRemove: + type: "boolean" + DetachKeys: + type: "string" + ID: + type: "string" + Running: + type: "boolean" + ExitCode: + type: "integer" + ProcessConfig: + $ref: "#/definitions/ProcessConfig" + OpenStdin: + type: "boolean" + OpenStderr: + type: "boolean" + OpenStdout: + type: "boolean" + ContainerID: + type: "string" + Pid: + type: "integer" + description: "The system process ID for the exec process." + examples: + application/json: + CanRemove: false + ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" + DetachKeys: "" + ExitCode: 2 + ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" + OpenStderr: true + OpenStdin: true + OpenStdout: true + ProcessConfig: + arguments: + - "-c" + - "exit 2" + entrypoint: "sh" + privileged: false + tty: true + user: "1000" + Running: false + Pid: 42000 + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + + /volumes: + get: + summary: "List volumes" + operationId: "VolumeList" + produces: ["application/json"] + responses: + 200: + description: "Summary volume data that matches the query" + schema: + $ref: "#/definitions/VolumeListResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to + process on the volumes list. Available filters: + + - `dangling=` When set to `true` (or `1`), returns all + volumes that are not in use by a container. When set to `false` + (or `0`), only volumes that are in use by one or more + containers are returned. + - `driver=` Matches volumes based on their driver. + - `label=` or `label=:` Matches volumes based on + the presence of a `label` alone or a `label` and a value. + - `name=` Matches all or part of a volume name. + type: "string" + format: "json" + tags: ["Volume"] + + /volumes/create: + post: + summary: "Create a volume" + operationId: "VolumeCreate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 201: + description: "The volume was created successfully" + schema: + $ref: "#/definitions/Volume" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "volumeConfig" + in: "body" + required: true + description: "Volume configuration" + schema: + $ref: "#/definitions/VolumeCreateOptions" + tags: ["Volume"] + + /volumes/{name}: + get: + summary: "Inspect a volume" + operationId: "VolumeInspect" + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Volume" + 404: + description: "No such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + tags: ["Volume"] + + put: + summary: | + "Update a volume. Valid only for Swarm cluster volumes" + operationId: "VolumeUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name or ID of the volume" + type: "string" + required: true + - name: "body" + in: "body" + schema: + # though the schema for is an object that contains only a + # ClusterVolumeSpec, wrapping the ClusterVolumeSpec in this object + # means that if, later on, we support things like changing the + # labels, we can do so without duplicating that information to the + # ClusterVolumeSpec. + type: "object" + description: "Volume configuration" + properties: + Spec: + $ref: "#/definitions/ClusterVolumeSpec" + description: | + The spec of the volume to update. Currently, only Availability may + change. All other fields must remain unchanged. + - name: "version" + in: "query" + description: | + The version number of the volume being updated. This is required to + avoid conflicting writes. Found in the volume's `ClusterVolume` + field. + type: "integer" + format: "int64" + required: true + tags: ["Volume"] + + delete: + summary: "Remove a volume" + description: "Instruct the driver to remove the volume." + operationId: "VolumeDelete" + responses: + 204: + description: "The volume was removed" + 404: + description: "No such volume or volume driver" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Volume is in use and cannot be removed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + - name: "force" + in: "query" + description: "Force the removal of the volume" + type: "boolean" + default: false + tags: ["Volume"] + + /volumes/prune: + post: + summary: "Delete unused volumes" + produces: + - "application/json" + operationId: "VolumePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune volumes with (or without, in case `label!=...` is used) the specified labels. + - `all` (`all=true`) - Consider all (local) volumes for pruning and not just anonymous volumes. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "VolumePruneResponse" + properties: + VolumesDeleted: + description: "Volumes that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Volume"] + /networks: + get: + summary: "List networks" + description: | + Returns a list of networks. For details on the format, see the + [network inspect endpoint](#operation/NetworkInspect). + + Note that it uses a different, smaller representation of a network than + inspecting a single network. For example, the list of containers attached + to the network is not propagated in API versions 1.28 and up. + operationId: "NetworkList" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Network" + examples: + application/json: + - Name: "bridge" + Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" + Created: "2016-10-19T06:21:00.416543526Z" + Scope: "local" + Driver: "bridge" + EnableIPv4: true + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: + - + Subnet: "172.17.0.0/16" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + - Name: "none" + Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "null" + EnableIPv4: false + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + - Name: "host" + Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "host" + EnableIPv4: false + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to process + on the networks list. + + Available filters: + + - `dangling=` When set to `true` (or `1`), returns all + networks that are not in use by a container. When set to `false` + (or `0`), only networks that are in use by one or more + containers are returned. + - `driver=` Matches a network's driver. + - `id=` Matches all or part of a network ID. + - `label=` or `label==` of a network label. + - `name=` Matches all or part of a network name. + - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`). + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + type: "string" + tags: ["Network"] + + /networks/{id}: + get: + summary: "Inspect a network" + operationId: "NetworkInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Network" + 404: + description: "Network not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "verbose" + in: "query" + description: "Detailed inspect output for troubleshooting" + type: "boolean" + default: false + - name: "scope" + in: "query" + description: "Filter the network by scope (swarm, global, or local)" + type: "string" + tags: ["Network"] + + delete: + summary: "Remove a network" + operationId: "NetworkDelete" + responses: + 204: + description: "No error" + 403: + description: "operation not supported for pre-defined networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such network" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + tags: ["Network"] + + /networks/create: + post: + summary: "Create a network" + operationId: "NetworkCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "Network created successfully" + schema: + $ref: "#/definitions/NetworkCreateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: | + Forbidden operation. This happens when trying to create a network named after a pre-defined network, + or when trying to create an overlay network on a daemon which is not part of a Swarm cluster. + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "plugin not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "networkConfig" + in: "body" + description: "Network configuration" + required: true + schema: + type: "object" + title: "NetworkCreateRequest" + required: ["Name"] + properties: + Name: + description: "The network's name." + type: "string" + example: "my_network" + Driver: + description: "Name of the network driver plugin to use." + type: "string" + default: "bridge" + example: "bridge" + Scope: + description: | + The level at which the network exists (e.g. `swarm` for cluster-wide + or `local` for machine level). + type: "string" + Internal: + description: "Restrict external access to the network." + type: "boolean" + Attachable: + description: | + Globally scoped network is manually attachable by regular + containers from workers in swarm mode. + type: "boolean" + example: true + Ingress: + description: | + Ingress network is the network which provides the routing-mesh + in swarm mode. + type: "boolean" + example: false + ConfigOnly: + description: | + Creates a config-only network. Config-only networks are placeholder + networks for network configurations to be used by other networks. + Config-only networks cannot be used directly to run containers + or services. + type: "boolean" + default: false + example: false + ConfigFrom: + description: | + Specifies the source which will provide the configuration for + this network. The specified network must be an existing + config-only network; see ConfigOnly. + $ref: "#/definitions/ConfigReference" + IPAM: + description: "Optional custom IP scheme for the network." + $ref: "#/definitions/IPAM" + EnableIPv4: + description: "Enable IPv4 on the network." + type: "boolean" + example: true + EnableIPv6: + description: "Enable IPv6 on the network." + type: "boolean" + example: true + Options: + description: "Network specific options to be used by the drivers." + type: "object" + additionalProperties: + type: "string" + example: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + tags: ["Network"] + + /networks/{id}/connect: + post: + summary: "Connect a container to a network" + description: "The network must be either a local-scoped network or a swarm-scoped network with the `attachable` option set. A network cannot be re-attached to a running container" + operationId: "NetworkConnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "Operation forbidden" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + title: "NetworkConnectRequest" + properties: + Container: + type: "string" + description: "The ID or name of the container to connect to the network." + EndpointConfig: + $ref: "#/definitions/EndpointSettings" + example: + Container: "3613f73ba0e4" + EndpointConfig: + IPAMConfig: + IPv4Address: "172.24.56.89" + IPv6Address: "2001:db8::5689" + MacAddress: "02:42:ac:12:05:02" + Priority: 100 + tags: ["Network"] + + /networks/{id}/disconnect: + post: + summary: "Disconnect a container from a network" + operationId: "NetworkDisconnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + title: "NetworkDisconnectRequest" + properties: + Container: + type: "string" + description: | + The ID or name of the container to disconnect from the network. + Force: + type: "boolean" + description: | + Force the container to disconnect from the network. + tags: ["Network"] + /networks/prune: + post: + summary: "Delete unused networks" + produces: + - "application/json" + operationId: "NetworkPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=` Prune networks created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune networks with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "NetworkPruneResponse" + properties: + NetworksDeleted: + description: "Networks that were deleted" + type: "array" + items: + type: "string" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Network"] + /plugins: + get: + summary: "List plugins" + operationId: "PluginList" + description: "Returns information about installed plugins." + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Plugin" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the plugin list. + + Available filters: + + - `capability=` + - `enable=|` + tags: ["Plugin"] + + /plugins/privileges: + get: + summary: "Get plugin privileges" + operationId: "GetPluginPrivileges" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + tags: + - "Plugin" + + /plugins/pull: + post: + summary: "Install a plugin" + operationId: "PluginPull" + description: | + Pulls and installs a plugin. After the plugin is installed, it can be + enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). + produces: + - "application/json" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + Remote reference for plugin to install. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "name" + in: "query" + description: | + Local name for the pulled plugin. + + The `:latest` tag is optional, and is used as the default if omitted. + required: false + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration to use when pulling a plugin + from a registry. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/{name}/json: + get: + summary: "Inspect a plugin" + operationId: "PluginInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + tags: ["Plugin"] + /plugins/{name}: + delete: + summary: "Remove a plugin" + operationId: "PluginDelete" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "force" + in: "query" + description: | + Disable the plugin before removing. This may result in issues if the + plugin is in use by a container. + type: "boolean" + default: false + tags: ["Plugin"] + /plugins/{name}/enable: + post: + summary: "Enable a plugin" + operationId: "PluginEnable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "timeout" + in: "query" + description: "Set the HTTP client timeout (in seconds)" + type: "integer" + default: 0 + tags: ["Plugin"] + /plugins/{name}/disable: + post: + summary: "Disable a plugin" + operationId: "PluginDisable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "force" + in: "query" + description: | + Force disable a plugin even if still in use. + required: false + type: "boolean" + tags: ["Plugin"] + /plugins/{name}/upgrade: + post: + summary: "Upgrade a plugin" + operationId: "PluginUpgrade" + responses: + 204: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "remote" + in: "query" + description: | + Remote reference to upgrade to. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration to use when pulling a plugin + from a registry. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/create: + post: + summary: "Create a plugin" + operationId: "PluginCreate" + consumes: + - "application/x-tar" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "query" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "tarContext" + in: "body" + description: "Path to tar containing plugin rootfs and manifest" + schema: + type: "string" + format: "binary" + tags: ["Plugin"] + /plugins/{name}/push: + post: + summary: "Push a plugin" + operationId: "PluginPush" + description: | + Push a plugin to the registry. + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + responses: + 200: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /plugins/{name}/set: + post: + summary: "Configure a plugin" + operationId: "PluginSet" + consumes: + - "application/json" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + type: "string" + example: ["DEBUG=1"] + responses: + 204: + description: "No error" + 404: + description: "Plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /nodes: + get: + summary: "List nodes" + operationId: "NodeList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Node" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `id=` + - `label=` + - `membership=`(`accepted`|`pending`)` + - `name=` + - `node.label=` + - `role=`(`manager`|`worker`)` + type: "string" + tags: ["Node"] + /nodes/{id}: + get: + summary: "Inspect a node" + operationId: "NodeInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Node" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + tags: ["Node"] + delete: + summary: "Delete a node" + operationId: "NodeDelete" + responses: + 200: + description: "no error" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + - name: "force" + in: "query" + description: "Force remove a node from the swarm" + default: false + type: "boolean" + tags: ["Node"] + /nodes/{id}/update: + post: + summary: "Update a node" + operationId: "NodeUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID of the node" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/NodeSpec" + - name: "version" + in: "query" + description: | + The version number of the node object being updated. This is required + to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Node"] + /swarm: + get: + summary: "Inspect swarm" + operationId: "SwarmInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Swarm" + 404: + description: "no such swarm" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/init: + post: + summary: "Initialize a new swarm" + operationId: "SwarmInit" + produces: + - "application/json" + - "text/plain" + responses: + 200: + description: "no error" + schema: + description: "The node ID" + type: "string" + example: "7v2t30z9blmxuhnyo6s4cpenp" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmInitRequest" + properties: + ListenAddr: + description: | + Listen address used for inter-manager communication, as well + as determining the networking interface used for the VXLAN + Tunnel Endpoint (VTEP). This can either be an address/port + combination in the form `192.168.1.1:4567`, or an interface + followed by a port number, like `eth0:4567`. If the port number + is omitted, the default swarm listening port is used. + type: "string" + AdvertiseAddr: + description: | + Externally reachable address advertised to other nodes. This + can either be an address/port combination in the form + `192.168.1.1:4567`, or an interface followed by a port number, + like `eth0:4567`. If the port number is omitted, the port + number from the listen address is used. If `AdvertiseAddr` is + not specified, it will be automatically detected when possible. + type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: + ``), for example, `192.168.1.1`, or an interface, + like `eth0`. If `DataPathAddr` is unspecified, the same address + as `AdvertiseAddr` is used. + + The `DataPathAddr` specifies the address that global scope + network drivers will publish towards other nodes in order to + reach the containers running on this node. Using this parameter + it is possible to separate the container data traffic from the + management traffic of the cluster. + type: "string" + DataPathPort: + description: | + DataPathPort specifies the data path port number for data traffic. + Acceptable port range is 1024 to 49151. + if no port is set or is set to 0, default port 4789 will be used. + type: "integer" + format: "uint32" + DefaultAddrPool: + description: | + Default Address Pool specifies default subnet pools for global + scope networks. + type: "array" + items: + type: "string" + example: ["10.10.0.0/16", "20.20.0.0/16"] + ForceNewCluster: + description: "Force creation of a new swarm." + type: "boolean" + SubnetSize: + description: | + SubnetSize specifies the subnet size of the networks created + from the default subnet pool. + type: "integer" + format: "uint32" + Spec: + $ref: "#/definitions/SwarmSpec" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + DataPathPort: 4789 + DefaultAddrPool: ["10.10.0.0/8", "20.20.0.0/8"] + SubnetSize: 24 + ForceNewCluster: false + Spec: + Orchestration: {} + Raft: {} + Dispatcher: {} + CAConfig: {} + EncryptionConfig: + AutoLockManagers: false + tags: ["Swarm"] + /swarm/join: + post: + summary: "Join an existing swarm" + operationId: "SwarmJoin" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmJoinRequest" + properties: + ListenAddr: + description: | + Listen address used for inter-manager communication if the node + gets promoted to manager, as well as determining the networking + interface used for the VXLAN Tunnel Endpoint (VTEP). + type: "string" + AdvertiseAddr: + description: | + Externally reachable address advertised to other nodes. This + can either be an address/port combination in the form + `192.168.1.1:4567`, or an interface followed by a port number, + like `eth0:4567`. If the port number is omitted, the port + number from the listen address is used. If `AdvertiseAddr` is + not specified, it will be automatically detected when possible. + type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: + ``), for example, `192.168.1.1`, or an interface, + like `eth0`. If `DataPathAddr` is unspecified, the same address + as `AdvertiseAddr` is used. + + The `DataPathAddr` specifies the address that global scope + network drivers will publish towards other nodes in order to + reach the containers running on this node. Using this parameter + it is possible to separate the container data traffic from the + management traffic of the cluster. + + type: "string" + RemoteAddrs: + description: | + Addresses of manager nodes already participating in the swarm. + type: "array" + items: + type: "string" + JoinToken: + description: "Secret token for joining this swarm." + type: "string" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + DataPathAddr: "192.168.1.1" + RemoteAddrs: + - "node1:2377" + JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + tags: ["Swarm"] + /swarm/leave: + post: + summary: "Leave a swarm" + operationId: "SwarmLeave" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "force" + description: | + Force leave swarm, even if this is the last manager or that it will + break the cluster. + in: "query" + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/update: + post: + summary: "Update a swarm" + operationId: "SwarmUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + $ref: "#/definitions/SwarmSpec" + - name: "version" + in: "query" + description: | + The version number of the swarm object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + - name: "rotateWorkerToken" + in: "query" + description: "Rotate the worker join token." + type: "boolean" + default: false + - name: "rotateManagerToken" + in: "query" + description: "Rotate the manager join token." + type: "boolean" + default: false + - name: "rotateManagerUnlockKey" + in: "query" + description: "Rotate the manager unlock key." + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/unlockkey: + get: + summary: "Get the unlock key" + operationId: "SwarmUnlockkey" + consumes: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "UnlockKeyResponse" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/unlock: + post: + summary: "Unlock a locked manager" + operationId: "SwarmUnlock" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmUnlockRequest" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /services: + get: + summary: "List services" + operationId: "ServiceList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Service" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the services list. + + Available filters: + + - `id=` + - `label=` + - `mode=["replicated"|"global"]` + - `name=` + - name: "status" + in: "query" + type: "boolean" + description: | + Include service status, with count of running and desired tasks. + tags: ["Service"] + /services/create: + post: + summary: "Create a service" + operationId: "ServiceCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/ServiceCreateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "network is not eligible for services" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "name conflicts with an existing service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "web" + TaskTemplate: + ContainerSpec: + Image: "nginx:alpine" + Mounts: + - + ReadOnly: true + Source: "web-data" + Target: "/usr/share/nginx/html" + Type: "volume" + VolumeOptions: + DriverConfig: {} + Labels: + com.example.something: "something-value" + Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"] + User: "33" + DNSConfig: + Nameservers: ["8.8.8.8"] + Search: ["example.org"] + Options: ["timeout:3"] + Secrets: + - + File: + Name: "www.example.org.key" + UID: "33" + GID: "33" + Mode: 384 + SecretID: "fpjqlhnwb19zds35k8wn80lq9" + SecretName: "example_org_domain_key" + OomScoreAdj: 0 + LogDriver: + Name: "json-file" + Options: + max-file: "3" + max-size: "10M" + Placement: {} + Resources: + Limits: + MemoryBytes: 104857600 + Reservations: {} + RestartPolicy: + Condition: "on-failure" + Delay: 10000000000 + MaxAttempts: 10 + Mode: + Replicated: + Replicas: 4 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Ports: + - + Protocol: "tcp" + PublishedPort: 8080 + TargetPort: 80 + Labels: + foo: "bar" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration for pulling from private + registries. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + tags: ["Service"] + /services/{id}: + get: + summary: "Inspect a service" + operationId: "ServiceInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Service" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "insertDefaults" + in: "query" + description: "Fill empty fields with default values." + type: "boolean" + default: false + tags: ["Service"] + delete: + summary: "Delete a service" + operationId: "ServiceDelete" + responses: + 200: + description: "no error" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + tags: ["Service"] + /services/{id}/update: + post: + summary: "Update a service" + operationId: "ServiceUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ServiceUpdateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "top" + TaskTemplate: + ContainerSpec: + Image: "busybox" + Args: + - "top" + OomScoreAdj: 0 + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + + - name: "version" + in: "query" + description: | + The version number of the service object being updated. This is + required to avoid conflicting writes. + This version number should be the value as currently set on the + service *before* the update. You can find the current version by + calling `GET /services/{id}` + required: true + type: "integer" + - name: "registryAuthFrom" + in: "query" + description: | + If the `X-Registry-Auth` header is not specified, this parameter + indicates where to find registry authorization credentials. + type: "string" + enum: ["spec", "previous-spec"] + default: "spec" + - name: "rollback" + in: "query" + description: | + Set to this parameter to `previous` to cause a server-side rollback + to the previous service spec. The supplied spec will be ignored in + this case. + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration for pulling from private + registries. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + + tags: ["Service"] + /services/{id}/logs: + get: + summary: "Get service logs" + description: | + Get `stdout` and `stderr` logs from a service. See also + [`/containers/{id}/logs`](#operation/ContainerLogs). + + **Note**: This endpoint works only for services with the `local`, + `json-file` or `journald` logging drivers. + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + operationId: "ServiceLogs" + responses: + 200: + description: "logs returned as a stream in response body" + schema: + type: "string" + format: "binary" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such service: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the service" + type: "string" + - name: "details" + in: "query" + description: "Show service context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Service"] + /tasks: + get: + summary: "List tasks" + operationId: "TaskList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Task" + example: + - ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + - ID: "1yljwbmlr8er2waf8orvqpwms" + Version: + Index: 30 + CreatedAt: "2016-06-07T21:07:30.019104782Z" + UpdatedAt: "2016-06-07T21:07:30.231958098Z" + Name: "hopeful_cori" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:30.202183143Z" + State: "shutdown" + Message: "shutdown" + ContainerStatus: + ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" + DesiredState: "shutdown" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.5/16" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the tasks list. + + Available filters: + + - `desired-state=(running | shutdown | accepted)` + - `id=` + - `label=key` or `label="key=value"` + - `name=` + - `node=` + - `service=` + tags: ["Task"] + /tasks/{id}: + get: + summary: "Inspect a task" + operationId: "TaskInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Task" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID of the task" + required: true + type: "string" + tags: ["Task"] + /tasks/{id}/logs: + get: + summary: "Get task logs" + description: | + Get `stdout` and `stderr` logs from a task. + See also [`/containers/{id}/logs`](#operation/ContainerLogs). + + **Note**: This endpoint works only for services with the `local`, + `json-file` or `journald` logging drivers. + operationId: "TaskLogs" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 200: + description: "logs returned as a stream in response body" + schema: + type: "string" + format: "binary" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such task: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID of the task" + type: "string" + - name: "details" + in: "query" + description: "Show task context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Task"] + /secrets: + get: + summary: "List secrets" + operationId: "SecretList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Secret" + example: + - ID: "blt1owaxmitz71s9v5zh81zun" + Version: + Index: 85 + CreatedAt: "2017-07-20T13:55:28.678958722Z" + UpdatedAt: "2017-07-20T13:55:28.678958722Z" + Spec: + Name: "mysql-passwd" + Labels: + some.label: "some.value" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + Labels: + foo: "bar" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the secrets list. + + Available filters: + + - `id=` + - `label= or label==value` + - `name=` + - `names=` + tags: ["Secret"] + /secrets/create: + post: + summary: "Create a secret" + operationId: "SecretCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/SecretSpec" + - type: "object" + example: + Name: "app-key.crt" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + tags: ["Secret"] + /secrets/{id}: + get: + summary: "Inspect a secret" + operationId: "SecretInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Secret" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + Labels: + foo: "bar" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + delete: + summary: "Delete a secret" + operationId: "SecretDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + /secrets/{id}/update: + post: + summary: "Update a Secret" + operationId: "SecretUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such secret" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the secret" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/SecretSpec" + description: | + The spec of the secret to update. Currently, only the Labels field + can be updated. All other fields must remain unchanged from the + [SecretInspect endpoint](#operation/SecretInspect) response values. + - name: "version" + in: "query" + description: | + The version number of the secret object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Secret"] + /configs: + get: + summary: "List configs" + operationId: "ConfigList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Config" + example: + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "server.conf" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the configs list. + + Available filters: + + - `id=` + - `label= or label==value` + - `name=` + - `names=` + tags: ["Config"] + /configs/create: + post: + summary: "Create a config" + operationId: "ConfigCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/ConfigSpec" + - type: "object" + example: + Name: "server.conf" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + tags: ["Config"] + /configs/{id}: + get: + summary: "Inspect a config" + operationId: "ConfigInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Config" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + delete: + summary: "Delete a config" + operationId: "ConfigDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + /configs/{id}/update: + post: + summary: "Update a Config" + operationId: "ConfigUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such config" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the config" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/ConfigSpec" + description: | + The spec of the config to update. Currently, only the Labels field + can be updated. All other fields must remain unchanged from the + [ConfigInspect endpoint](#operation/ConfigInspect) response values. + - name: "version" + in: "query" + description: | + The version number of the config object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Config"] + /distribution/{name}/json: + get: + summary: "Get image information from the registry" + description: | + Return image digest and platform information by contacting the registry. + operationId: "DistributionInspect" + produces: + - "application/json" + responses: + 200: + description: "descriptor and platform information" + schema: + $ref: "#/definitions/DistributionInspect" + 401: + description: "Failed authentication or no image found" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + tags: ["Distribution"] + /session: + post: + summary: "Initialize interactive session" + description: | + Start a new interactive session with a server. Session allows server to + call back to the client for advanced capabilities. + + ### Hijacking + + This endpoint hijacks the HTTP connection to HTTP2 transport that allows + the client to expose gPRC services on that connection. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /session HTTP/1.1 + Upgrade: h2c + Connection: Upgrade + ``` + + The Docker daemon responds with a `101 UPGRADED` response follow with + the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Connection: Upgrade + Upgrade: h2c + ``` + operationId: "Session" + produces: + - "application/vnd.docker.raw-stream" + responses: + 101: + description: "no error, hijacking successful" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Session"] diff --git a/_vendor/github.com/moby/moby/docs/api/v1.51.yaml b/_vendor/github.com/moby/moby/docs/api/v1.51.yaml new file mode 100644 index 000000000000..3880635db128 --- /dev/null +++ b/_vendor/github.com/moby/moby/docs/api/v1.51.yaml @@ -0,0 +1,13431 @@ +# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API. +# +# This is used for generating API documentation and the types used by the +# client/server. See api/README.md for more information. +# +# Some style notes: +# - This file is used by ReDoc, which allows GitHub Flavored Markdown in +# descriptions. +# - There is no maximum line length, for ease of editing and pretty diffs. +# - operationIds are in the format "NounVerb", with a singular noun. + +swagger: "2.0" +schemes: + - "http" + - "https" +produces: + - "application/json" + - "text/plain" +consumes: + - "application/json" + - "text/plain" +basePath: "/v1.51" +info: + title: "Docker Engine API" + version: "1.51" + x-logo: + url: "https://docs.docker.com/assets/images/logo-docker-main.png" + description: | + The Engine API is an HTTP API served by Docker Engine. It is the API the + Docker client uses to communicate with the Engine, so everything the Docker + client can do can be done with the API. + + Most of the client's commands map directly to API endpoints (e.g. `docker ps` + is `GET /containers/json`). The notable exception is running containers, + which consists of several API calls. + + # Errors + + The API uses standard HTTP status codes to indicate the success or failure + of the API call. The body of the response will be JSON in the following + format: + + ``` + { + "message": "page not found" + } + ``` + + # Versioning + + The API is usually changed in each release, so API calls are versioned to + ensure that clients don't break. To lock to a specific version of the API, + you prefix the URL with its version, for example, call `/v1.30/info` to use + the v1.30 version of the `/info` endpoint. If the API version specified in + the URL is not supported by the daemon, a HTTP `400 Bad Request` error message + is returned. + + If you omit the version-prefix, the current version of the API (v1.50) is used. + For example, calling `/info` is the same as calling `/v1.51/info`. Using the + API without a version-prefix is deprecated and will be removed in a future release. + + Engine releases in the near future should support this version of the API, + so your client will continue to work even if it is talking to a newer Engine. + + The API uses an open schema model, which means the server may add extra properties + to responses. Likewise, the server will ignore any extra query parameters and + request body properties. When you write clients, you need to ignore additional + properties in responses to ensure they do not break when talking to newer + daemons. + + + # Authentication + + Authentication for registries is handled client side. The client has to send + authentication details to various endpoints that need to communicate with + registries, such as `POST /images/(name)/push`. These are sent as + `X-Registry-Auth` header as a [base64url encoded](https://tools.ietf.org/html/rfc4648#section-5) + (JSON) string with the following structure: + + ``` + { + "username": "string", + "password": "string", + "email": "string", + "serveraddress": "string" + } + ``` + + The `serveraddress` is a domain/IP without a protocol. Throughout this + structure, double quotes are required. + + If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), + you can just pass this instead of credentials: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +# The tags on paths define the menu sections in the ReDoc documentation, so +# the usage of tags must make sense for that: +# - They should be singular, not plural. +# - There should not be too many tags, or the menu becomes unwieldy. For +# example, it is preferable to add a path to the "System" tag instead of +# creating a tag with a single path in it. +# - The order of tags in this list defines the order in the menu. +tags: + # Primary objects + - name: "Container" + x-displayName: "Containers" + description: | + Create and manage containers. + - name: "Image" + x-displayName: "Images" + - name: "Network" + x-displayName: "Networks" + description: | + Networks are user-defined networks that containers can be attached to. + See the [networking documentation](https://docs.docker.com/network/) + for more information. + - name: "Volume" + x-displayName: "Volumes" + description: | + Create and manage persistent storage that can be attached to containers. + - name: "Exec" + x-displayName: "Exec" + description: | + Run new commands inside running containers. Refer to the + [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) + for more information. + + To exec a command in a container, you first need to create an exec instance, + then start it. These two API endpoints are wrapped up in a single command-line + command, `docker exec`. + + # Swarm things + - name: "Swarm" + x-displayName: "Swarm" + description: | + Engines can be clustered together in a swarm. Refer to the + [swarm mode documentation](https://docs.docker.com/engine/swarm/) + for more information. + - name: "Node" + x-displayName: "Nodes" + description: | + Nodes are instances of the Engine participating in a swarm. Swarm mode + must be enabled for these endpoints to work. + - name: "Service" + x-displayName: "Services" + description: | + Services are the definitions of tasks to run on a swarm. Swarm mode must + be enabled for these endpoints to work. + - name: "Task" + x-displayName: "Tasks" + description: | + A task is a container running on a swarm. It is the atomic scheduling unit + of swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Secret" + x-displayName: "Secrets" + description: | + Secrets are sensitive data that can be used by services. Swarm mode must + be enabled for these endpoints to work. + - name: "Config" + x-displayName: "Configs" + description: | + Configs are application configurations that can be used by services. Swarm + mode must be enabled for these endpoints to work. + # System things + - name: "Plugin" + x-displayName: "Plugins" + - name: "System" + x-displayName: "System" + +definitions: + Port: + type: "object" + description: "An open port on a container" + required: [PrivatePort, Type] + properties: + IP: + type: "string" + format: "ip-address" + description: "Host IP address that the container's port is mapped to" + PrivatePort: + type: "integer" + format: "uint16" + x-nullable: false + description: "Port on the container" + PublicPort: + type: "integer" + format: "uint16" + description: "Port exposed on the host" + Type: + type: "string" + x-nullable: false + enum: ["tcp", "udp", "sctp"] + example: + PrivatePort: 8080 + PublicPort: 80 + Type: "tcp" + + MountPoint: + type: "object" + description: | + MountPoint represents a mount point configuration inside the container. + This is used for reporting the mountpoints in use by a container. + properties: + Type: + description: | + The mount type: + + - `bind` a mount of a file or directory from the host into the container. + - `volume` a docker volume with the given `Name`. + - `image` a docker image + - `tmpfs` a `tmpfs`. + - `npipe` a named pipe from the host into the container. + - `cluster` a Swarm cluster volume + type: "string" + enum: + - "bind" + - "volume" + - "image" + - "tmpfs" + - "npipe" + - "cluster" + example: "volume" + Name: + description: | + Name is the name reference to the underlying data defined by `Source` + e.g., the volume name. + type: "string" + example: "myvolume" + Source: + description: | + Source location of the mount. + + For volumes, this contains the storage location of the volume (within + `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains + the source (host) part of the bind-mount. For `tmpfs` mount points, this + field is empty. + type: "string" + example: "/var/lib/docker/volumes/myvolume/_data" + Destination: + description: | + Destination is the path relative to the container root (`/`) where + the `Source` is mounted inside the container. + type: "string" + example: "/usr/share/nginx/html/" + Driver: + description: | + Driver is the volume driver used to create the volume (if it is a volume). + type: "string" + example: "local" + Mode: + description: | + Mode is a comma separated list of options supplied by the user when + creating the bind/volume mount. + + The default is platform-specific (`"z"` on Linux, empty on Windows). + type: "string" + example: "z" + RW: + description: | + Whether the mount is mounted writable (read-write). + type: "boolean" + example: true + Propagation: + description: | + Propagation describes how mounts are propagated from the host into the + mount point, and vice-versa. Refer to the [Linux kernel documentation](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt) + for details. This field is not used on Windows. + type: "string" + example: "" + + DeviceMapping: + type: "object" + description: "A device mapping between the host and container" + properties: + PathOnHost: + type: "string" + PathInContainer: + type: "string" + CgroupPermissions: + type: "string" + example: + PathOnHost: "/dev/deviceName" + PathInContainer: "/dev/deviceName" + CgroupPermissions: "mrw" + + DeviceRequest: + type: "object" + description: "A request for devices to be sent to device drivers" + properties: + Driver: + type: "string" + example: "nvidia" + Count: + type: "integer" + example: -1 + DeviceIDs: + type: "array" + items: + type: "string" + example: + - "0" + - "1" + - "GPU-fef8089b-4820-abfc-e83e-94318197576e" + Capabilities: + description: | + A list of capabilities; an OR list of AND lists of capabilities. + type: "array" + items: + type: "array" + items: + type: "string" + example: + # gpu AND nvidia AND compute + - ["gpu", "nvidia", "compute"] + Options: + description: | + Driver-specific options, specified as a key/value pairs. These options + are passed directly to the driver. + type: "object" + additionalProperties: + type: "string" + + ThrottleDevice: + type: "object" + properties: + Path: + description: "Device path" + type: "string" + Rate: + description: "Rate" + type: "integer" + format: "int64" + minimum: 0 + + Mount: + type: "object" + properties: + Target: + description: "Container path." + type: "string" + Source: + description: "Mount source (e.g. a volume name, a host path)." + type: "string" + Type: + description: | + The mount type. Available types: + + - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. + - `image` Mounts an image. + - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. + - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. + - `cluster` a Swarm cluster volume + type: "string" + enum: + - "bind" + - "volume" + - "image" + - "tmpfs" + - "npipe" + - "cluster" + ReadOnly: + description: "Whether the mount should be read-only." + type: "boolean" + Consistency: + description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`." + type: "string" + BindOptions: + description: "Optional configuration for the `bind` type." + type: "object" + properties: + Propagation: + description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." + type: "string" + enum: + - "private" + - "rprivate" + - "shared" + - "rshared" + - "slave" + - "rslave" + NonRecursive: + description: "Disable recursive bind mount." + type: "boolean" + default: false + CreateMountpoint: + description: "Create mount point on host if missing" + type: "boolean" + default: false + ReadOnlyNonRecursive: + description: | + Make the mount non-recursively read-only, but still leave the mount recursive + (unless NonRecursive is set to `true` in conjunction). + + Added in v1.44, before that version all read-only mounts were + non-recursive by default. To match the previous behaviour this + will default to `true` for clients on versions prior to v1.44. + type: "boolean" + default: false + ReadOnlyForceRecursive: + description: "Raise an error if the mount cannot be made recursively read-only." + type: "boolean" + default: false + VolumeOptions: + description: "Optional configuration for the `volume` type." + type: "object" + properties: + NoCopy: + description: "Populate volume with data from the target." + type: "boolean" + default: false + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + DriverConfig: + description: "Map of driver specific options" + type: "object" + properties: + Name: + description: "Name of the driver to use to create the volume." + type: "string" + Options: + description: "key/value map of driver specific options." + type: "object" + additionalProperties: + type: "string" + Subpath: + description: "Source path inside the volume. Must be relative without any back traversals." + type: "string" + example: "dir-inside-volume/subdirectory" + ImageOptions: + description: "Optional configuration for the `image` type." + type: "object" + properties: + Subpath: + description: "Source path inside the image. Must be relative without any back traversals." + type: "string" + example: "dir-inside-image/subdirectory" + TmpfsOptions: + description: "Optional configuration for the `tmpfs` type." + type: "object" + properties: + SizeBytes: + description: "The size for the tmpfs mount in bytes." + type: "integer" + format: "int64" + Mode: + description: "The permission mode for the tmpfs mount in an integer." + type: "integer" + Options: + description: | + The options to be passed to the tmpfs mount. An array of arrays. + Flag options should be provided as 1-length arrays. Other types + should be provided as as 2-length arrays, where the first item is + the key and the second the value. + type: "array" + items: + type: "array" + minItems: 1 + maxItems: 2 + items: + type: "string" + example: + [["noexec"]] + + RestartPolicy: + description: | + The behavior to apply when the container exits. The default is not to + restart. + + An ever increasing delay (double the previous delay, starting at 100ms) is + added before each restart to prevent flooding the server. + type: "object" + properties: + Name: + type: "string" + description: | + - Empty string means not to restart + - `no` Do not automatically restart + - `always` Always restart + - `unless-stopped` Restart always except when the user has manually stopped the container + - `on-failure` Restart only when the container exit code is non-zero + enum: + - "" + - "no" + - "always" + - "unless-stopped" + - "on-failure" + MaximumRetryCount: + type: "integer" + description: | + If `on-failure` is used, the number of times to retry before giving up. + + Resources: + description: "A container's resources (cgroups config, ulimits, etc)" + type: "object" + properties: + # Applicable to all platforms + CpuShares: + description: | + An integer value representing this container's relative CPU weight + versus other containers. + type: "integer" + Memory: + description: "Memory limit in bytes." + type: "integer" + format: "int64" + default: 0 + # Applicable to UNIX platforms + CgroupParent: + description: | + Path to `cgroups` under which the container's `cgroup` is created. If + the path is not absolute, the path is considered to be relative to the + `cgroups` path of the init process. Cgroups are created if they do not + already exist. + type: "string" + BlkioWeight: + description: "Block IO weight (relative weight)." + type: "integer" + minimum: 0 + maximum: 1000 + BlkioWeightDevice: + description: | + Block IO weight (relative device weight) in the form: + + ``` + [{"Path": "device_path", "Weight": weight}] + ``` + type: "array" + items: + type: "object" + properties: + Path: + type: "string" + Weight: + type: "integer" + minimum: 0 + BlkioDeviceReadBps: + description: | + Limit read rate (bytes per second) from a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteBps: + description: | + Limit write rate (bytes per second) to a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceReadIOps: + description: | + Limit read rate (IO per second) from a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteIOps: + description: | + Limit write rate (IO per second) to a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + CpuPeriod: + description: "The length of a CPU period in microseconds." + type: "integer" + format: "int64" + CpuQuota: + description: | + Microseconds of CPU time that the container can get in a CPU period. + type: "integer" + format: "int64" + CpuRealtimePeriod: + description: | + The length of a CPU real-time period in microseconds. Set to 0 to + allocate no time allocated to real-time tasks. + type: "integer" + format: "int64" + CpuRealtimeRuntime: + description: | + The length of a CPU real-time runtime in microseconds. Set to 0 to + allocate no time allocated to real-time tasks. + type: "integer" + format: "int64" + CpusetCpus: + description: | + CPUs in which to allow execution (e.g., `0-3`, `0,1`). + type: "string" + example: "0-3" + CpusetMems: + description: | + Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only + effective on NUMA systems. + type: "string" + Devices: + description: "A list of devices to add to the container." + type: "array" + items: + $ref: "#/definitions/DeviceMapping" + DeviceCgroupRules: + description: "a list of cgroup rules to apply to the container" + type: "array" + items: + type: "string" + example: "c 13:* rwm" + DeviceRequests: + description: | + A list of requests for devices to be sent to device drivers. + type: "array" + items: + $ref: "#/definitions/DeviceRequest" + KernelMemoryTCP: + description: | + Hard limit for kernel TCP buffer memory (in bytes). Depending on the + OCI runtime in use, this option may be ignored. It is no longer supported + by the default (runc) runtime. + + This field is omitted when empty. + type: "integer" + format: "int64" + MemoryReservation: + description: "Memory soft limit in bytes." + type: "integer" + format: "int64" + MemorySwap: + description: | + Total memory limit (memory + swap). Set as `-1` to enable unlimited + swap. + type: "integer" + format: "int64" + MemorySwappiness: + description: | + Tune a container's memory swappiness behavior. Accepts an integer + between 0 and 100. + type: "integer" + format: "int64" + minimum: 0 + maximum: 100 + NanoCpus: + description: "CPU quota in units of 10-9 CPUs." + type: "integer" + format: "int64" + OomKillDisable: + description: "Disable OOM Killer for the container." + type: "boolean" + Init: + description: | + Run an init inside the container that forwards signals and reaps + processes. This field is omitted if empty, and the default (as + configured on the daemon) is used. + type: "boolean" + x-nullable: true + PidsLimit: + description: | + Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` + to not change. + type: "integer" + format: "int64" + x-nullable: true + Ulimits: + description: | + A list of resource limits to set in the container. For example: + + ``` + {"Name": "nofile", "Soft": 1024, "Hard": 2048} + ``` + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + # Applicable to Windows + CpuCount: + description: | + The number of usable CPUs (Windows only). + + On Windows Server containers, the processor resource controls are + mutually exclusive. The order of precedence is `CPUCount` first, then + `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + CpuPercent: + description: | + The usable percentage of the available CPUs (Windows only). + + On Windows Server containers, the processor resource controls are + mutually exclusive. The order of precedence is `CPUCount` first, then + `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + IOMaximumIOps: + description: "Maximum IOps for the container system drive (Windows only)" + type: "integer" + format: "int64" + IOMaximumBandwidth: + description: | + Maximum IO in bytes per second for the container system drive + (Windows only). + type: "integer" + format: "int64" + + Limit: + description: | + An object describing a limit on resources which can be requested by a task. + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + example: 4000000000 + MemoryBytes: + type: "integer" + format: "int64" + example: 8272408576 + Pids: + description: | + Limits the maximum number of PIDs in the container. Set `0` for unlimited. + type: "integer" + format: "int64" + default: 0 + example: 100 + + ResourceObject: + description: | + An object describing the resources which can be advertised by a node and + requested by a task. + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + example: 4000000000 + MemoryBytes: + type: "integer" + format: "int64" + example: 8272408576 + GenericResources: + $ref: "#/definitions/GenericResources" + + GenericResources: + description: | + User-defined resources can be either Integer resources (e.g, `SSD=3`) or + String resources (e.g, `GPU=UUID1`). + type: "array" + items: + type: "object" + properties: + NamedResourceSpec: + type: "object" + properties: + Kind: + type: "string" + Value: + type: "string" + DiscreteResourceSpec: + type: "object" + properties: + Kind: + type: "string" + Value: + type: "integer" + format: "int64" + example: + - DiscreteResourceSpec: + Kind: "SSD" + Value: 3 + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID1" + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID2" + + HealthConfig: + description: "A test to perform to check that the container is healthy." + type: "object" + properties: + Test: + description: | + The test to perform. Possible values are: + + - `[]` inherit healthcheck from image or parent image + - `["NONE"]` disable healthcheck + - `["CMD", args...]` exec arguments directly + - `["CMD-SHELL", command]` run command with system's default shell + type: "array" + items: + type: "string" + Interval: + description: | + The time to wait between checks in nanoseconds. It should be 0 or at + least 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + Timeout: + description: | + The time to wait before considering the check to have hung. It should + be 0 or at least 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + Retries: + description: | + The number of consecutive failures needed to consider a container as + unhealthy. 0 means inherit. + type: "integer" + StartPeriod: + description: | + Start period for the container to initialize before starting + health-retries countdown in nanoseconds. It should be 0 or at least + 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + StartInterval: + description: | + The time to wait between checks in nanoseconds during the start period. + It should be 0 or at least 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + + Health: + description: | + Health stores information about the container's healthcheck results. + type: "object" + x-nullable: true + properties: + Status: + description: | + Status is one of `none`, `starting`, `healthy` or `unhealthy` + + - "none" Indicates there is no healthcheck + - "starting" Starting indicates that the container is not yet ready + - "healthy" Healthy indicates that the container is running correctly + - "unhealthy" Unhealthy indicates that the container has a problem + type: "string" + enum: + - "none" + - "starting" + - "healthy" + - "unhealthy" + example: "healthy" + FailingStreak: + description: "FailingStreak is the number of consecutive failures" + type: "integer" + example: 0 + Log: + type: "array" + description: | + Log contains the last few results (oldest first) + items: + $ref: "#/definitions/HealthcheckResult" + + HealthcheckResult: + description: | + HealthcheckResult stores information about a single run of a healthcheck probe + type: "object" + x-nullable: true + properties: + Start: + description: | + Date and time at which this check started in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "date-time" + example: "2020-01-04T10:44:24.496525531Z" + End: + description: | + Date and time at which this check ended in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2020-01-04T10:45:21.364524523Z" + ExitCode: + description: | + ExitCode meanings: + + - `0` healthy + - `1` unhealthy + - `2` reserved (considered unhealthy) + - other values: error running probe + type: "integer" + example: 0 + Output: + description: "Output from last check" + type: "string" + + HostConfig: + description: "Container configuration that depends on the host we are running on" + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + # Applicable to all platforms + Binds: + type: "array" + description: | + A list of volume bindings for this container. Each volume binding + is a string in one of these forms: + + - `host-src:container-dest[:options]` to bind-mount a host path + into the container. Both `host-src`, and `container-dest` must + be an _absolute_ path. + - `volume-name:container-dest[:options]` to bind-mount a volume + managed by a volume driver into the container. `container-dest` + must be an _absolute_ path. + + `options` is an optional, comma-delimited list of: + + - `nocopy` disables automatic copying of data from the container + path to the volume. The `nocopy` flag only applies to named volumes. + - `[ro|rw]` mounts a volume read-only or read-write, respectively. + If omitted or set to `rw`, volumes are mounted read-write. + - `[z|Z]` applies SELinux labels to allow or deny multiple containers + to read and write to the same volume. + - `z`: a _shared_ content label is applied to the content. This + label indicates that multiple containers can share the volume + content, for both reading and writing. + - `Z`: a _private unshared_ label is applied to the content. + This label indicates that only the current container can use + a private volume. Labeling systems such as SELinux require + proper labels to be placed on volume content that is mounted + into a container. Without a label, the security system can + prevent a container's processes from using the content. By + default, the labels set by the host operating system are not + modified. + - `[[r]shared|[r]slave|[r]private]` specifies mount + [propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt). + This only applies to bind-mounted volumes, not internal volumes + or named volumes. Mount propagation requires the source mount + point (the location where the source directory is mounted in the + host operating system) to have the correct propagation properties. + For shared volumes, the source mount point must be set to `shared`. + For slave volumes, the mount must be set to either `shared` or + `slave`. + items: + type: "string" + ContainerIDFile: + type: "string" + description: "Path to a file where the container ID is written" + example: "" + LogConfig: + type: "object" + description: "The logging configuration for this container" + properties: + Type: + description: |- + Name of the logging driver used for the container or "none" + if logging is disabled. + type: "string" + enum: + - "local" + - "json-file" + - "syslog" + - "journald" + - "gelf" + - "fluentd" + - "awslogs" + - "splunk" + - "etwlogs" + - "none" + Config: + description: |- + Driver-specific configuration options for the logging driver. + type: "object" + additionalProperties: + type: "string" + example: + "max-file": "5" + "max-size": "10m" + NetworkMode: + type: "string" + description: | + Network mode to use for this container. Supported standard values + are: `bridge`, `host`, `none`, and `container:`. Any + other value is taken as a custom network's name to which this + container should connect to. + PortBindings: + $ref: "#/definitions/PortMap" + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + AutoRemove: + type: "boolean" + description: | + Automatically remove the container when the container's process + exits. This has no effect if `RestartPolicy` is set. + VolumeDriver: + type: "string" + description: "Driver that this container uses to mount volumes." + VolumesFrom: + type: "array" + description: | + A list of volumes to inherit from another container, specified in + the form `[:]`. + items: + type: "string" + Mounts: + description: | + Specification for mounts to be added to the container. + type: "array" + items: + $ref: "#/definitions/Mount" + ConsoleSize: + type: "array" + description: | + Initial console size, as an `[height, width]` array. + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + example: [80, 64] + Annotations: + type: "object" + description: | + Arbitrary non-identifying metadata attached to container and + provided to the runtime when the container is started. + additionalProperties: + type: "string" + + # Applicable to UNIX platforms + CapAdd: + type: "array" + description: | + A list of kernel capabilities to add to the container. Conflicts + with option 'Capabilities'. + items: + type: "string" + CapDrop: + type: "array" + description: | + A list of kernel capabilities to drop from the container. Conflicts + with option 'Capabilities'. + items: + type: "string" + CgroupnsMode: + type: "string" + enum: + - "private" + - "host" + description: | + cgroup namespace mode for the container. Possible values are: + + - `"private"`: the container runs in its own private cgroup namespace + - `"host"`: use the host system's cgroup namespace + + If not specified, the daemon default is used, which can either be `"private"` + or `"host"`, depending on daemon version, kernel support and configuration. + Dns: + type: "array" + description: "A list of DNS servers for the container to use." + items: + type: "string" + DnsOptions: + type: "array" + description: "A list of DNS options." + items: + type: "string" + DnsSearch: + type: "array" + description: "A list of DNS search domains." + items: + type: "string" + ExtraHosts: + type: "array" + description: | + A list of hostnames/IP mappings to add to the container's `/etc/hosts` + file. Specified in the form `["hostname:IP"]`. + items: + type: "string" + GroupAdd: + type: "array" + description: | + A list of additional groups that the container process will run as. + items: + type: "string" + IpcMode: + type: "string" + description: | + IPC sharing mode for the container. Possible values are: + + - `"none"`: own private IPC namespace, with /dev/shm not mounted + - `"private"`: own private IPC namespace + - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers + - `"container:"`: join another (shareable) container's IPC namespace + - `"host"`: use the host system's IPC namespace + + If not specified, daemon default is used, which can either be `"private"` + or `"shareable"`, depending on daemon version and configuration. + Cgroup: + type: "string" + description: "Cgroup to use for the container." + Links: + type: "array" + description: | + A list of links for the container in the form `container_name:alias`. + items: + type: "string" + OomScoreAdj: + type: "integer" + description: | + An integer value containing the score given to the container in + order to tune OOM killer preferences. + example: 500 + PidMode: + type: "string" + description: | + Set the PID (Process) Namespace mode for the container. It can be + either: + + - `"container:"`: joins another container's PID namespace + - `"host"`: use the host's PID namespace inside the container + Privileged: + type: "boolean" + description: |- + Gives the container full access to the host. + PublishAllPorts: + type: "boolean" + description: | + Allocates an ephemeral host port for all of a container's + exposed ports. + + Ports are de-allocated when the container stops and allocated when + the container starts. The allocated port might be changed when + restarting the container. + + The port is selected from the ephemeral port range that depends on + the kernel. For example, on Linux the range is defined by + `/proc/sys/net/ipv4/ip_local_port_range`. + ReadonlyRootfs: + type: "boolean" + description: "Mount the container's root filesystem as read only." + SecurityOpt: + type: "array" + description: | + A list of string values to customize labels for MLS systems, such + as SELinux. + items: + type: "string" + StorageOpt: + type: "object" + description: | + Storage driver options for this container, in the form `{"size": "120G"}`. + additionalProperties: + type: "string" + Tmpfs: + type: "object" + description: | + A map of container directories which should be replaced by tmpfs + mounts, and their corresponding mount options. For example: + + ``` + { "/run": "rw,noexec,nosuid,size=65536k" } + ``` + additionalProperties: + type: "string" + UTSMode: + type: "string" + description: "UTS namespace to use for the container." + UsernsMode: + type: "string" + description: | + Sets the usernamespace mode for the container when usernamespace + remapping option is enabled. + ShmSize: + type: "integer" + format: "int64" + description: | + Size of `/dev/shm` in bytes. If omitted, the system uses 64MB. + minimum: 0 + Sysctls: + type: "object" + x-nullable: true + description: |- + A list of kernel parameters (sysctls) to set in the container. + + This field is omitted if not set. + additionalProperties: + type: "string" + example: + "net.ipv4.ip_forward": "1" + Runtime: + type: "string" + x-nullable: true + description: |- + Runtime to use with this container. + # Applicable to Windows + Isolation: + type: "string" + description: | + Isolation technology of the container. (Windows only) + enum: + - "default" + - "process" + - "hyperv" + - "" + MaskedPaths: + type: "array" + description: | + The list of paths to be masked inside the container (this overrides + the default set of paths). + items: + type: "string" + example: + - "/proc/asound" + - "/proc/acpi" + - "/proc/kcore" + - "/proc/keys" + - "/proc/latency_stats" + - "/proc/timer_list" + - "/proc/timer_stats" + - "/proc/sched_debug" + - "/proc/scsi" + - "/sys/firmware" + - "/sys/devices/virtual/powercap" + ReadonlyPaths: + type: "array" + description: | + The list of paths to be set as read-only inside the container + (this overrides the default set of paths). + items: + type: "string" + example: + - "/proc/bus" + - "/proc/fs" + - "/proc/irq" + - "/proc/sys" + - "/proc/sysrq-trigger" + + ContainerConfig: + description: | + Configuration for a container that is portable between hosts. + type: "object" + properties: + Hostname: + description: | + The hostname to use for the container, as a valid RFC 1123 hostname. + type: "string" + example: "439f4e91bd1d" + Domainname: + description: | + The domain name to use for the container. + type: "string" + User: + description: |- + Commands run as this user inside the container. If omitted, commands + run as the user specified in the image the container was started from. + + Can be either user-name or UID, and optional group-name or GID, + separated by a colon (`[<:group-name|GID>]`). + type: "string" + example: "123:456" + AttachStdin: + description: "Whether to attach to `stdin`." + type: "boolean" + default: false + AttachStdout: + description: "Whether to attach to `stdout`." + type: "boolean" + default: true + AttachStderr: + description: "Whether to attach to `stderr`." + type: "boolean" + default: true + ExposedPorts: + description: | + An object mapping ports to an empty object in the form: + + `{"/": {}}` + type: "object" + x-nullable: true + additionalProperties: + type: "object" + enum: + - {} + default: {} + example: { + "80/tcp": {}, + "443/tcp": {} + } + Tty: + description: | + Attach standard streams to a TTY, including `stdin` if it is not closed. + type: "boolean" + default: false + OpenStdin: + description: "Open `stdin`" + type: "boolean" + default: false + StdinOnce: + description: "Close `stdin` after one attached client disconnects" + type: "boolean" + default: false + Env: + description: | + A list of environment variables to set inside the container in the + form `["VAR=value", ...]`. A variable without `=` is removed from the + environment, rather than to have an empty value. + type: "array" + items: + type: "string" + example: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Cmd: + description: | + Command to run specified as a string or an array of strings. + type: "array" + items: + type: "string" + example: ["/bin/sh"] + Healthcheck: + $ref: "#/definitions/HealthConfig" + ArgsEscaped: + description: "Command is already escaped (Windows only)" + type: "boolean" + default: false + example: false + x-nullable: true + Image: + description: | + The name (or reference) of the image to use when creating the container, + or which was used when the container was created. + type: "string" + example: "example-image:1.0" + Volumes: + description: | + An object mapping mount point paths inside the container to empty + objects. + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + WorkingDir: + description: "The working directory for commands to run in." + type: "string" + example: "/public/" + Entrypoint: + description: | + The entry point for the container as a string or an array of strings. + + If the array consists of exactly one empty string (`[""]`) then the + entry point is reset to system default (i.e., the entry point used by + docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + type: "array" + items: + type: "string" + example: [] + NetworkDisabled: + description: "Disable networking for the container." + type: "boolean" + x-nullable: true + MacAddress: + description: | + MAC address of the container. + + Deprecated: this field is deprecated in API v1.44 and up. Use EndpointSettings.MacAddress instead. + type: "string" + x-nullable: true + OnBuild: + description: | + `ONBUILD` metadata that were defined in the image's `Dockerfile`. + type: "array" + x-nullable: true + items: + type: "string" + example: [] + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + StopSignal: + description: | + Signal to stop a container as a string or unsigned integer. + type: "string" + example: "SIGTERM" + x-nullable: true + StopTimeout: + description: "Timeout to stop a container in seconds." + type: "integer" + default: 10 + x-nullable: true + Shell: + description: | + Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. + type: "array" + x-nullable: true + items: + type: "string" + example: ["/bin/sh", "-c"] + + ImageConfig: + description: | + Configuration of the image. These fields are used as defaults + when starting a container from the image. + type: "object" + properties: + User: + description: "The user that commands are run as inside the container." + type: "string" + example: "web:web" + ExposedPorts: + description: | + An object mapping ports to an empty object in the form: + + `{"/": {}}` + type: "object" + x-nullable: true + additionalProperties: + type: "object" + enum: + - {} + default: {} + example: { + "80/tcp": {}, + "443/tcp": {} + } + Env: + description: | + A list of environment variables to set inside the container in the + form `["VAR=value", ...]`. A variable without `=` is removed from the + environment, rather than to have an empty value. + type: "array" + items: + type: "string" + example: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Cmd: + description: | + Command to run specified as a string or an array of strings. + type: "array" + items: + type: "string" + example: ["/bin/sh"] + Healthcheck: + $ref: "#/definitions/HealthConfig" + ArgsEscaped: + description: "Command is already escaped (Windows only)" + type: "boolean" + default: false + example: false + x-nullable: true + Volumes: + description: | + An object mapping mount point paths inside the container to empty + objects. + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + example: + "/app/data": {} + "/app/config": {} + WorkingDir: + description: "The working directory for commands to run in." + type: "string" + example: "/public/" + Entrypoint: + description: | + The entry point for the container as a string or an array of strings. + + If the array consists of exactly one empty string (`[""]`) then the + entry point is reset to system default (i.e., the entry point used by + docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + type: "array" + items: + type: "string" + example: [] + OnBuild: + description: | + `ONBUILD` metadata that were defined in the image's `Dockerfile`. + type: "array" + x-nullable: true + items: + type: "string" + example: [] + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + StopSignal: + description: | + Signal to stop a container as a string or unsigned integer. + type: "string" + example: "SIGTERM" + x-nullable: true + Shell: + description: | + Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. + type: "array" + x-nullable: true + items: + type: "string" + example: ["/bin/sh", "-c"] + # FIXME(thaJeztah): temporarily using a full example to remove some "omitempty" fields. Remove once the fields are removed. + example: + "User": "web:web" + "ExposedPorts": { + "80/tcp": {}, + "443/tcp": {} + } + "Env": ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"] + "Cmd": ["/bin/sh"] + "Healthcheck": { + "Test": ["string"], + "Interval": 0, + "Timeout": 0, + "Retries": 0, + "StartPeriod": 0, + "StartInterval": 0 + } + "ArgsEscaped": true + "Volumes": { + "/app/data": {}, + "/app/config": {} + } + "WorkingDir": "/public/" + "Entrypoint": [] + "OnBuild": [] + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } + "StopSignal": "SIGTERM" + "Shell": ["/bin/sh", "-c"] + + NetworkingConfig: + description: | + NetworkingConfig represents the container's networking configuration for + each of its interfaces. + It is used for the networking configs specified in the `docker create` + and `docker network connect` commands. + type: "object" + properties: + EndpointsConfig: + description: | + A mapping of network name to endpoint configuration for that network. + The endpoint configuration can be left empty to connect to that + network with no particular endpoint configuration. + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + example: + # putting an example here, instead of using the example values from + # /definitions/EndpointSettings, because EndpointSettings contains + # operational data returned when inspecting a container that we don't + # accept here. + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + MacAddress: "02:42:ac:12:05:02" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + database_nw: {} + + NetworkSettings: + description: "NetworkSettings exposes the network settings in the API" + type: "object" + properties: + Bridge: + description: | + Name of the default bridge interface when dockerd's --bridge flag is set. + type: "string" + example: "docker0" + SandboxID: + description: SandboxID uniquely represents a container's network stack. + type: "string" + example: "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3" + HairpinMode: + description: | + Indicates if hairpin NAT should be enabled on the virtual interface. + + Deprecated: This field is never set and will be removed in a future release. + type: "boolean" + example: false + LinkLocalIPv6Address: + description: | + IPv6 unicast address using the link-local prefix. + + Deprecated: This field is never set and will be removed in a future release. + type: "string" + example: "" + LinkLocalIPv6PrefixLen: + description: | + Prefix length of the IPv6 unicast address. + + Deprecated: This field is never set and will be removed in a future release. + type: "integer" + example: "" + Ports: + $ref: "#/definitions/PortMap" + SandboxKey: + description: SandboxKey is the full path of the netns handle + type: "string" + example: "/var/run/docker/netns/8ab54b426c38" + + SecondaryIPAddresses: + description: "Deprecated: This field is never set and will be removed in a future release." + type: "array" + items: + $ref: "#/definitions/Address" + x-nullable: true + + SecondaryIPv6Addresses: + description: "Deprecated: This field is never set and will be removed in a future release." + type: "array" + items: + $ref: "#/definitions/Address" + x-nullable: true + + # TODO properties below are part of DefaultNetworkSettings, which is + # marked as deprecated since Docker 1.9 and to be removed in Docker v17.12 + EndpointID: + description: | + EndpointID uniquely represents a service endpoint in a Sandbox. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" + Gateway: + description: | + Gateway address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "172.17.0.1" + GlobalIPv6Address: + description: | + Global IPv6 address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "2001:db8::5689" + GlobalIPv6PrefixLen: + description: | + Mask length of the global IPv6 address. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "integer" + example: 64 + IPAddress: + description: | + IPv4 address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "172.17.0.4" + IPPrefixLen: + description: | + Mask length of the IPv4 address. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "integer" + example: 16 + IPv6Gateway: + description: | + IPv6 gateway address for this network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "2001:db8:2::100" + MacAddress: + description: | + MAC address for the container on the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "02:42:ac:11:00:04" + Networks: + description: | + Information about all networks that the container is connected to. + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + + Address: + description: Address represents an IPv4 or IPv6 IP address. + type: "object" + properties: + Addr: + description: IP address. + type: "string" + PrefixLen: + description: Mask length of the IP address. + type: "integer" + + PortMap: + description: | + PortMap describes the mapping of container ports to host ports, using the + container's port-number and protocol as key in the format `/`, + for example, `80/udp`. + + If a container's port is mapped for multiple protocols, separate entries + are added to the mapping table. + type: "object" + additionalProperties: + type: "array" + x-nullable: true + items: + $ref: "#/definitions/PortBinding" + example: + "443/tcp": + - HostIp: "127.0.0.1" + HostPort: "4443" + "80/tcp": + - HostIp: "0.0.0.0" + HostPort: "80" + - HostIp: "0.0.0.0" + HostPort: "8080" + "80/udp": + - HostIp: "0.0.0.0" + HostPort: "80" + "53/udp": + - HostIp: "0.0.0.0" + HostPort: "53" + "2377/tcp": null + + PortBinding: + description: | + PortBinding represents a binding between a host IP address and a host + port. + type: "object" + properties: + HostIp: + description: "Host IP address that the container's port is mapped to." + type: "string" + example: "127.0.0.1" + HostPort: + description: "Host port number that the container's port is mapped to." + type: "string" + example: "4443" + + DriverData: + description: | + Information about the storage driver used to store the container's and + image's filesystem. + type: "object" + required: [Name, Data] + properties: + Name: + description: "Name of the storage driver." + type: "string" + x-nullable: false + example: "overlay2" + Data: + description: | + Low-level storage metadata, provided as key/value pairs. + + This information is driver-specific, and depends on the storage-driver + in use, and should be used for informational purposes only. + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: { + "MergedDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/merged", + "UpperDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/diff", + "WorkDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/work" + } + + FilesystemChange: + description: | + Change in the container's filesystem. + type: "object" + required: [Path, Kind] + properties: + Path: + description: | + Path to file or directory that has changed. + type: "string" + x-nullable: false + Kind: + $ref: "#/definitions/ChangeType" + + ChangeType: + description: | + Kind of change + + Can be one of: + + - `0`: Modified ("C") + - `1`: Added ("A") + - `2`: Deleted ("D") + type: "integer" + format: "uint8" + enum: [0, 1, 2] + x-nullable: false + + ImageInspect: + description: | + Information about an image in the local image cache. + type: "object" + properties: + Id: + description: | + ID is the content-addressable ID of an image. + + This identifier is a content-addressable digest calculated from the + image's configuration (which includes the digests of layers used by + the image). + + Note that this digest differs from the `RepoDigests` below, which + holds digests of image manifests that reference the image. + type: "string" + x-nullable: false + example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" + Descriptor: + description: | + Descriptor is an OCI descriptor of the image target. + In case of a multi-platform image, this descriptor points to the OCI index + or a manifest list. + + This field is only present if the daemon provides a multi-platform image store. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + x-nullable: true + $ref: "#/definitions/OCIDescriptor" + Manifests: + description: | + Manifests is a list of image manifests available in this image. It + provides a more detailed view of the platform-specific image manifests or + other image-attached data like build attestations. + + Only available if the daemon provides a multi-platform image store + and the `manifests` option is set in the inspect request. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + type: "array" + x-nullable: true + items: + $ref: "#/definitions/ImageManifestSummary" + RepoTags: + description: | + List of image names/tags in the local image cache that reference this + image. + + Multiple image tags can refer to the same image, and this list may be + empty if no tags reference the image, in which case the image is + "untagged", in which case it can still be referenced by its ID. + type: "array" + items: + type: "string" + example: + - "example:1.0" + - "example:latest" + - "example:stable" + - "internal.registry.example.com:5000/example:1.0" + RepoDigests: + description: | + List of content-addressable digests of locally available image manifests + that the image is referenced from. Multiple manifests can refer to the + same image. + + These digests are usually only available if the image was either pulled + from a registry, or if the image was pushed to a registry, which is when + the manifest is generated and its digest calculated. + type: "array" + items: + type: "string" + example: + - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" + - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + Parent: + description: | + ID of the parent image. + + Depending on how the image was created, this field may be empty and + is only set for images that were built/created locally. This field + is empty if the image was pulled from an image registry. + type: "string" + x-nullable: false + example: "" + Comment: + description: | + Optional message that was set when committing or importing the image. + type: "string" + x-nullable: false + example: "" + Created: + description: | + Date and time at which the image was created, formatted in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + + This information is only available if present in the image, + and omitted otherwise. + type: "string" + format: "dateTime" + x-nullable: true + example: "2022-02-04T21:20:12.497794809Z" + DockerVersion: + description: | + The version of Docker that was used to build the image. + + Depending on how the image was created, this field may be empty. + type: "string" + x-nullable: false + example: "27.0.1" + Author: + description: | + Name of the author that was specified when committing the image, or as + specified through MAINTAINER (deprecated) in the Dockerfile. + type: "string" + x-nullable: false + example: "" + Config: + $ref: "#/definitions/ImageConfig" + Architecture: + description: | + Hardware CPU architecture that the image runs on. + type: "string" + x-nullable: false + example: "arm" + Variant: + description: | + CPU architecture variant (presently ARM-only). + type: "string" + x-nullable: true + example: "v7" + Os: + description: | + Operating System the image is built to run on. + type: "string" + x-nullable: false + example: "linux" + OsVersion: + description: | + Operating System version the image is built to run on (especially + for Windows). + type: "string" + example: "" + x-nullable: true + Size: + description: | + Total size of the image including all layers it is composed of. + type: "integer" + format: "int64" + x-nullable: false + example: 1239828 + VirtualSize: + description: | + Total size of the image including all layers it is composed of. + + Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. + type: "integer" + format: "int64" + example: 1239828 + GraphDriver: + $ref: "#/definitions/DriverData" + RootFS: + description: | + Information about the image's RootFS, including the layer IDs. + type: "object" + required: [Type] + properties: + Type: + type: "string" + x-nullable: false + example: "layers" + Layers: + type: "array" + items: + type: "string" + example: + - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" + - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + Metadata: + description: | + Additional metadata of the image in the local cache. This information + is local to the daemon, and not part of the image itself. + type: "object" + properties: + LastTagTime: + description: | + Date and time at which the image was last tagged in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + + This information is only available if the image was tagged locally, + and omitted otherwise. + type: "string" + format: "dateTime" + example: "2022-02-28T14:40:02.623929178Z" + x-nullable: true + + ImageSummary: + type: "object" + x-go-name: "Summary" + required: + - Id + - ParentId + - RepoTags + - RepoDigests + - Created + - Size + - SharedSize + - Labels + - Containers + properties: + Id: + description: | + ID is the content-addressable ID of an image. + + This identifier is a content-addressable digest calculated from the + image's configuration (which includes the digests of layers used by + the image). + + Note that this digest differs from the `RepoDigests` below, which + holds digests of image manifests that reference the image. + type: "string" + x-nullable: false + example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" + ParentId: + description: | + ID of the parent image. + + Depending on how the image was created, this field may be empty and + is only set for images that were built/created locally. This field + is empty if the image was pulled from an image registry. + type: "string" + x-nullable: false + example: "" + RepoTags: + description: | + List of image names/tags in the local image cache that reference this + image. + + Multiple image tags can refer to the same image, and this list may be + empty if no tags reference the image, in which case the image is + "untagged", in which case it can still be referenced by its ID. + type: "array" + x-nullable: false + items: + type: "string" + example: + - "example:1.0" + - "example:latest" + - "example:stable" + - "internal.registry.example.com:5000/example:1.0" + RepoDigests: + description: | + List of content-addressable digests of locally available image manifests + that the image is referenced from. Multiple manifests can refer to the + same image. + + These digests are usually only available if the image was either pulled + from a registry, or if the image was pushed to a registry, which is when + the manifest is generated and its digest calculated. + type: "array" + x-nullable: false + items: + type: "string" + example: + - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" + - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + Created: + description: | + Date and time at which the image was created as a Unix timestamp + (number of seconds since EPOCH). + type: "integer" + x-nullable: false + example: "1644009612" + Size: + description: | + Total size of the image including all layers it is composed of. + type: "integer" + format: "int64" + x-nullable: false + example: 172064416 + SharedSize: + description: | + Total size of image layers that are shared between this image and other + images. + + This size is not calculated by default. `-1` indicates that the value + has not been set / calculated. + type: "integer" + format: "int64" + x-nullable: false + example: 1239828 + VirtualSize: + description: |- + Total size of the image including all layers it is composed of. + + Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. + type: "integer" + format: "int64" + example: 172064416 + Labels: + description: "User-defined key/value metadata." + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Containers: + description: | + Number of containers using this image. Includes both stopped and running + containers. + + `-1` indicates that the value has not been set / calculated. + x-nullable: false + type: "integer" + example: 2 + Manifests: + description: | + Manifests is a list of manifests available in this image. + It provides a more detailed view of the platform-specific image manifests + or other image-attached data like build attestations. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + type: "array" + x-nullable: false + x-omitempty: true + items: + $ref: "#/definitions/ImageManifestSummary" + Descriptor: + description: | + Descriptor is an OCI descriptor of the image target. + In case of a multi-platform image, this descriptor points to the OCI index + or a manifest list. + + This field is only present if the daemon provides a multi-platform image store. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + x-nullable: true + $ref: "#/definitions/OCIDescriptor" + + AuthConfig: + type: "object" + properties: + username: + type: "string" + password: + type: "string" + email: + type: "string" + serveraddress: + type: "string" + example: + username: "hannibal" + password: "xxxx" + serveraddress: "https://index.docker.io/v1/" + + ProcessConfig: + type: "object" + properties: + privileged: + type: "boolean" + user: + type: "string" + tty: + type: "boolean" + entrypoint: + type: "string" + arguments: + type: "array" + items: + type: "string" + + Volume: + type: "object" + required: [Name, Driver, Mountpoint, Labels, Scope, Options] + properties: + Name: + type: "string" + description: "Name of the volume." + x-nullable: false + example: "tardis" + Driver: + type: "string" + description: "Name of the volume driver used by the volume." + x-nullable: false + example: "custom" + Mountpoint: + type: "string" + description: "Mount path of the volume on the host." + x-nullable: false + example: "/var/lib/docker/volumes/tardis" + CreatedAt: + type: "string" + format: "dateTime" + description: "Date/Time the volume was created." + example: "2016-06-07T20:31:11.853781916Z" + Status: + type: "object" + description: | + Low-level details about the volume, provided by the volume driver. + Details are returned as a map with key/value pairs: + `{"key":"value","key2":"value2"}`. + + The `Status` field is optional, and is omitted if the volume driver + does not support this feature. + additionalProperties: + type: "object" + example: + hello: "world" + Labels: + type: "object" + description: "User-defined key/value metadata." + x-nullable: false + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Scope: + type: "string" + description: | + The level at which the volume exists. Either `global` for cluster-wide, + or `local` for machine level. + default: "local" + x-nullable: false + enum: ["local", "global"] + example: "local" + ClusterVolume: + $ref: "#/definitions/ClusterVolume" + Options: + type: "object" + description: | + The driver specific options used when creating the volume. + additionalProperties: + type: "string" + example: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + UsageData: + type: "object" + x-nullable: true + x-go-name: "UsageData" + required: [Size, RefCount] + description: | + Usage details about the volume. This information is used by the + `GET /system/df` endpoint, and omitted in other endpoints. + properties: + Size: + type: "integer" + format: "int64" + default: -1 + description: | + Amount of disk space used by the volume (in bytes). This information + is only available for volumes created with the `"local"` volume + driver. For volumes created with other volume drivers, this field + is set to `-1` ("not available") + x-nullable: false + RefCount: + type: "integer" + format: "int64" + default: -1 + description: | + The number of containers referencing this volume. This field + is set to `-1` if the reference-count is not available. + x-nullable: false + + VolumeCreateOptions: + description: "Volume configuration" + type: "object" + title: "VolumeConfig" + x-go-name: "CreateOptions" + properties: + Name: + description: | + The new volume's name. If not specified, Docker generates a name. + type: "string" + x-nullable: false + example: "tardis" + Driver: + description: "Name of the volume driver to use." + type: "string" + default: "local" + x-nullable: false + example: "custom" + DriverOpts: + description: | + A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. + type: "object" + additionalProperties: + type: "string" + example: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + ClusterVolumeSpec: + $ref: "#/definitions/ClusterVolumeSpec" + + VolumeListResponse: + type: "object" + title: "VolumeListResponse" + x-go-name: "ListResponse" + description: "Volume list response" + properties: + Volumes: + type: "array" + description: "List of volumes" + items: + $ref: "#/definitions/Volume" + Warnings: + type: "array" + description: | + Warnings that occurred when fetching the list of volumes. + items: + type: "string" + example: [] + + Network: + type: "object" + properties: + Name: + description: | + Name of the network. + type: "string" + example: "my_network" + Id: + description: | + ID that uniquely identifies a network on a single machine. + type: "string" + example: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" + Created: + description: | + Date and time at which the network was created in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-10-19T04:33:30.360899459Z" + Scope: + description: | + The level at which the network exists (e.g. `swarm` for cluster-wide + or `local` for machine level) + type: "string" + example: "local" + Driver: + description: | + The name of the driver used to create the network (e.g. `bridge`, + `overlay`). + type: "string" + example: "overlay" + EnableIPv4: + description: | + Whether the network was created with IPv4 enabled. + type: "boolean" + example: true + EnableIPv6: + description: | + Whether the network was created with IPv6 enabled. + type: "boolean" + example: false + IPAM: + $ref: "#/definitions/IPAM" + Internal: + description: | + Whether the network is created to only allow internal networking + connectivity. + type: "boolean" + default: false + example: false + Attachable: + description: | + Whether a global / swarm scope network is manually attachable by regular + containers from workers in swarm mode. + type: "boolean" + default: false + example: false + Ingress: + description: | + Whether the network is providing the routing-mesh for the swarm cluster. + type: "boolean" + default: false + example: false + ConfigFrom: + $ref: "#/definitions/ConfigReference" + ConfigOnly: + description: | + Whether the network is a config-only network. Config-only networks are + placeholder networks for network configurations to be used by other + networks. Config-only networks cannot be used directly to run containers + or services. + type: "boolean" + default: false + Containers: + description: | + Contains endpoints attached to the network. + type: "object" + additionalProperties: + $ref: "#/definitions/NetworkContainer" + example: + 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: + Name: "test" + EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" + MacAddress: "02:42:ac:13:00:02" + IPv4Address: "172.19.0.2/16" + IPv6Address: "" + Options: + description: | + Network-specific options uses when creating the network. + type: "object" + additionalProperties: + type: "string" + example: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Peers: + description: | + List of peer nodes for an overlay network. This field is only present + for overlay networks, and omitted for other network types. + type: "array" + items: + $ref: "#/definitions/PeerInfo" + x-nullable: true + # TODO: Add Services (only present when "verbose" is set). + + ConfigReference: + description: | + The config-only network source to provide the configuration for + this network. + type: "object" + properties: + Network: + description: | + The name of the config-only network that provides the network's + configuration. The specified network must be an existing config-only + network. Only network names are allowed, not network IDs. + type: "string" + example: "config_only_network_01" + + IPAM: + type: "object" + properties: + Driver: + description: "Name of the IPAM driver to use." + type: "string" + default: "default" + example: "default" + Config: + description: | + List of IPAM configuration options, specified as a map: + + ``` + {"Subnet": , "IPRange": , "Gateway": , "AuxAddress": } + ``` + type: "array" + items: + $ref: "#/definitions/IPAMConfig" + Options: + description: "Driver-specific options, specified as a map." + type: "object" + additionalProperties: + type: "string" + example: + foo: "bar" + + IPAMConfig: + type: "object" + properties: + Subnet: + type: "string" + example: "172.20.0.0/16" + IPRange: + type: "string" + example: "172.20.10.0/24" + Gateway: + type: "string" + example: "172.20.10.11" + AuxiliaryAddresses: + type: "object" + additionalProperties: + type: "string" + + NetworkContainer: + type: "object" + properties: + Name: + type: "string" + example: "container_1" + EndpointID: + type: "string" + example: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" + MacAddress: + type: "string" + example: "02:42:ac:13:00:02" + IPv4Address: + type: "string" + example: "172.19.0.2/16" + IPv6Address: + type: "string" + example: "" + + PeerInfo: + description: | + PeerInfo represents one peer of an overlay network. + type: "object" + properties: + Name: + description: + ID of the peer-node in the Swarm cluster. + type: "string" + example: "6869d7c1732b" + IP: + description: + IP-address of the peer-node in the Swarm cluster. + type: "string" + example: "10.133.77.91" + + NetworkCreateResponse: + description: "OK response to NetworkCreate operation" + type: "object" + title: "NetworkCreateResponse" + x-go-name: "CreateResponse" + required: [Id, Warning] + properties: + Id: + description: "The ID of the created network." + type: "string" + x-nullable: false + example: "b5c4fc71e8022147cd25de22b22173de4e3b170134117172eb595cb91b4e7e5d" + Warning: + description: "Warnings encountered when creating the container" + type: "string" + x-nullable: false + example: "" + + BuildInfo: + type: "object" + properties: + id: + type: "string" + stream: + type: "string" + error: + type: "string" + x-nullable: true + description: |- + errors encountered during the operation. + + + > **Deprecated**: This field is deprecated since API v1.4, and will be omitted in a future API version. Use the information in errorDetail instead. + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + x-nullable: true + description: |- + Progress is a pre-formatted presentation of progressDetail. + + + > **Deprecated**: This field is deprecated since API v1.8, and will be omitted in a future API version. Use the information in progressDetail instead. + progressDetail: + $ref: "#/definitions/ProgressDetail" + aux: + $ref: "#/definitions/ImageID" + + BuildCache: + type: "object" + description: | + BuildCache contains information about a build cache record. + properties: + ID: + type: "string" + description: | + Unique ID of the build cache record. + example: "ndlpt0hhvkqcdfkputsk4cq9c" + Parent: + description: | + ID of the parent build cache record. + + > **Deprecated**: This field is deprecated, and omitted if empty. + type: "string" + x-nullable: true + example: "" + Parents: + description: | + List of parent build cache record IDs. + type: "array" + items: + type: "string" + x-nullable: true + example: ["hw53o5aio51xtltp5xjp8v7fx"] + Type: + type: "string" + description: | + Cache record type. + example: "regular" + # see https://github.com/moby/buildkit/blob/fce4a32258dc9d9664f71a4831d5de10f0670677/client/diskusage.go#L75-L84 + enum: + - "internal" + - "frontend" + - "source.local" + - "source.git.checkout" + - "exec.cachemount" + - "regular" + Description: + type: "string" + description: | + Description of the build-step that produced the build cache. + example: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" + InUse: + type: "boolean" + description: | + Indicates if the build cache is in use. + example: false + Shared: + type: "boolean" + description: | + Indicates if the build cache is shared. + example: true + Size: + description: | + Amount of disk space used by the build cache (in bytes). + type: "integer" + example: 51 + CreatedAt: + description: | + Date and time at which the build cache was created in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + LastUsedAt: + description: | + Date and time at which the build cache was last used in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + x-nullable: true + example: "2017-08-09T07:09:37.632105588Z" + UsageCount: + type: "integer" + example: 26 + + ImageID: + type: "object" + description: "Image ID or Digest" + properties: + ID: + type: "string" + example: + ID: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" + + CreateImageInfo: + type: "object" + properties: + id: + type: "string" + error: + type: "string" + x-nullable: true + description: |- + errors encountered during the operation. + + + > **Deprecated**: This field is deprecated since API v1.4, and will be omitted in a future API version. Use the information in errorDetail instead. + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + x-nullable: true + description: |- + Progress is a pre-formatted presentation of progressDetail. + + + > **Deprecated**: This field is deprecated since API v1.8, and will be omitted in a future API version. Use the information in progressDetail instead. + progressDetail: + $ref: "#/definitions/ProgressDetail" + + PushImageInfo: + type: "object" + properties: + error: + type: "string" + x-nullable: true + description: |- + errors encountered during the operation. + + + > **Deprecated**: This field is deprecated since API v1.4, and will be omitted in a future API version. Use the information in errorDetail instead. + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + x-nullable: true + description: |- + Progress is a pre-formatted presentation of progressDetail. + + + > **Deprecated**: This field is deprecated since API v1.8, and will be omitted in a future API version. Use the information in progressDetail instead. + progressDetail: + $ref: "#/definitions/ProgressDetail" + + DeviceInfo: + type: "object" + description: | + DeviceInfo represents a device that can be used by a container. + properties: + Source: + type: "string" + example: "cdi" + description: | + The origin device driver. + ID: + type: "string" + example: "vendor.com/gpu=0" + description: | + The unique identifier for the device within its source driver. + For CDI devices, this would be an FQDN like "vendor.com/gpu=0". + + ErrorDetail: + type: "object" + properties: + code: + type: "integer" + message: + type: "string" + + ProgressDetail: + type: "object" + properties: + current: + type: "integer" + total: + type: "integer" + + ErrorResponse: + description: "Represents an error." + type: "object" + required: ["message"] + properties: + message: + description: "The error message." + type: "string" + x-nullable: false + example: + message: "Something went wrong." + + IDResponse: + description: "Response to an API call that returns just an Id" + type: "object" + x-go-name: "IDResponse" + required: ["Id"] + properties: + Id: + description: "The id of the newly created object." + type: "string" + x-nullable: false + + EndpointSettings: + description: "Configuration for a network endpoint." + type: "object" + properties: + # Configurations + IPAMConfig: + $ref: "#/definitions/EndpointIPAMConfig" + Links: + type: "array" + items: + type: "string" + example: + - "container_1" + - "container_2" + MacAddress: + description: | + MAC address for the endpoint on this network. The network driver might ignore this parameter. + type: "string" + example: "02:42:ac:11:00:04" + Aliases: + type: "array" + items: + type: "string" + example: + - "server_x" + - "server_y" + DriverOpts: + description: | + DriverOpts is a mapping of driver options and values. These options + are passed directly to the driver and are driver specific. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + GwPriority: + description: | + This property determines which endpoint will provide the default + gateway for a container. The endpoint with the highest priority will + be used. If multiple endpoints have the same priority, endpoints are + lexicographically sorted based on their network name, and the one + that sorts first is picked. + type: "integer" + format: "int64" + example: + - 10 + + # Operational data + NetworkID: + description: | + Unique ID of the network. + type: "string" + example: "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a" + EndpointID: + description: | + Unique ID for the service endpoint in a Sandbox. + type: "string" + example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" + Gateway: + description: | + Gateway address for this network. + type: "string" + example: "172.17.0.1" + IPAddress: + description: | + IPv4 address. + type: "string" + example: "172.17.0.4" + IPPrefixLen: + description: | + Mask length of the IPv4 address. + type: "integer" + example: 16 + IPv6Gateway: + description: | + IPv6 gateway address. + type: "string" + example: "2001:db8:2::100" + GlobalIPv6Address: + description: | + Global IPv6 address. + type: "string" + example: "2001:db8::5689" + GlobalIPv6PrefixLen: + description: | + Mask length of the global IPv6 address. + type: "integer" + format: "int64" + example: 64 + DNSNames: + description: | + List of all DNS names an endpoint has on a specific network. This + list is based on the container name, network aliases, container short + ID, and hostname. + + These DNS names are non-fully qualified but can contain several dots. + You can get fully qualified DNS names by appending `.`. + For instance, if container name is `my.ctr` and the network is named + `testnet`, `DNSNames` will contain `my.ctr` and the FQDN will be + `my.ctr.testnet`. + type: array + items: + type: string + example: ["foobar", "server_x", "server_y", "my.ctr"] + + EndpointIPAMConfig: + description: | + EndpointIPAMConfig represents an endpoint's IPAM configuration. + type: "object" + x-nullable: true + properties: + IPv4Address: + type: "string" + example: "172.20.30.33" + IPv6Address: + type: "string" + example: "2001:db8:abcd::3033" + LinkLocalIPs: + type: "array" + items: + type: "string" + example: + - "169.254.34.68" + - "fe80::3468" + + PluginMount: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Source, Destination, Type, Options] + properties: + Name: + type: "string" + x-nullable: false + example: "some-mount" + Description: + type: "string" + x-nullable: false + example: "This is a mount that's used by the plugin." + Settable: + type: "array" + items: + type: "string" + Source: + type: "string" + example: "/var/lib/docker/plugins/" + Destination: + type: "string" + x-nullable: false + example: "/mnt/state" + Type: + type: "string" + x-nullable: false + example: "bind" + Options: + type: "array" + items: + type: "string" + example: + - "rbind" + - "rw" + + PluginDevice: + type: "object" + required: [Name, Description, Settable, Path] + x-nullable: false + properties: + Name: + type: "string" + x-nullable: false + Description: + type: "string" + x-nullable: false + Settable: + type: "array" + items: + type: "string" + Path: + type: "string" + example: "/dev/fuse" + + PluginEnv: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + Description: + x-nullable: false + type: "string" + Settable: + type: "array" + items: + type: "string" + Value: + type: "string" + + PluginInterfaceType: + type: "object" + x-nullable: false + required: [Prefix, Capability, Version] + properties: + Prefix: + type: "string" + x-nullable: false + Capability: + type: "string" + x-nullable: false + Version: + type: "string" + x-nullable: false + + PluginPrivilege: + description: | + Describes a permission the user has to accept upon installing + the plugin. + type: "object" + x-go-name: "PluginPrivilege" + properties: + Name: + type: "string" + example: "network" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - "host" + + Plugin: + description: "A plugin for the Engine API" + type: "object" + required: [Settings, Enabled, Config, Name] + properties: + Id: + type: "string" + example: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" + Name: + type: "string" + x-nullable: false + example: "tiborvass/sample-volume-plugin" + Enabled: + description: + True if the plugin is running. False if the plugin is not running, + only installed. + type: "boolean" + x-nullable: false + example: true + Settings: + description: "Settings that can be modified by users." + type: "object" + x-nullable: false + required: [Args, Devices, Env, Mounts] + properties: + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + type: "string" + example: + - "DEBUG=0" + Args: + type: "array" + items: + type: "string" + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PluginReference: + description: "plugin remote reference used to push/pull the plugin" + type: "string" + x-nullable: false + example: "localhost:5000/tiborvass/sample-volume-plugin:latest" + Config: + description: "The config of a plugin." + type: "object" + x-nullable: false + required: + - Description + - Documentation + - Interface + - Entrypoint + - WorkDir + - Network + - Linux + - PidHost + - PropagatedMount + - IpcHost + - Mounts + - Env + - Args + properties: + DockerVersion: + description: "Docker Version used to create the plugin" + type: "string" + x-nullable: false + example: "17.06.0-ce" + Description: + type: "string" + x-nullable: false + example: "A sample volume plugin for Docker" + Documentation: + type: "string" + x-nullable: false + example: "https://docs.docker.com/engine/extend/plugins/" + Interface: + description: "The interface between Docker and the plugin" + x-nullable: false + type: "object" + required: [Types, Socket] + properties: + Types: + type: "array" + items: + $ref: "#/definitions/PluginInterfaceType" + example: + - "docker.volumedriver/1.0" + Socket: + type: "string" + x-nullable: false + example: "plugins.sock" + ProtocolScheme: + type: "string" + example: "some.protocol/v1.0" + description: "Protocol to use for clients connecting to the plugin." + enum: + - "" + - "moby.plugins.http/v1" + Entrypoint: + type: "array" + items: + type: "string" + example: + - "/usr/bin/sample-volume-plugin" + - "/data" + WorkDir: + type: "string" + x-nullable: false + example: "/bin/" + User: + type: "object" + x-nullable: false + properties: + UID: + type: "integer" + format: "uint32" + example: 1000 + GID: + type: "integer" + format: "uint32" + example: 1000 + Network: + type: "object" + x-nullable: false + required: [Type] + properties: + Type: + x-nullable: false + type: "string" + example: "host" + Linux: + type: "object" + x-nullable: false + required: [Capabilities, AllowAllDevices, Devices] + properties: + Capabilities: + type: "array" + items: + type: "string" + example: + - "CAP_SYS_ADMIN" + - "CAP_SYSLOG" + AllowAllDevices: + type: "boolean" + x-nullable: false + example: false + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PropagatedMount: + type: "string" + x-nullable: false + example: "/mnt/volumes" + IpcHost: + type: "boolean" + x-nullable: false + example: false + PidHost: + type: "boolean" + x-nullable: false + example: false + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + $ref: "#/definitions/PluginEnv" + example: + - Name: "DEBUG" + Description: "If set, prints debug messages" + Settable: null + Value: "0" + Args: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + example: "args" + Description: + x-nullable: false + type: "string" + example: "command line arguments" + Settable: + type: "array" + items: + type: "string" + Value: + type: "array" + items: + type: "string" + rootfs: + type: "object" + properties: + type: + type: "string" + example: "layers" + diff_ids: + type: "array" + items: + type: "string" + example: + - "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887" + - "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" + + ObjectVersion: + description: | + The version number of the object such as node, service, etc. This is needed + to avoid conflicting writes. The client must send the version number along + with the modified specification when updating these objects. + + This approach ensures safe concurrency and determinism in that the change + on the object may not be applied if the version number has changed from the + last read. In other words, if two update requests specify the same base + version, only one of the requests can succeed. As a result, two separate + update requests that happen at the same time will not unintentionally + overwrite each other. + type: "object" + properties: + Index: + type: "integer" + format: "uint64" + example: 373531 + + NodeSpec: + type: "object" + properties: + Name: + description: "Name for the node." + type: "string" + example: "my-node" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Role: + description: "Role of the node." + type: "string" + enum: + - "worker" + - "manager" + example: "manager" + Availability: + description: "Availability of the node." + type: "string" + enum: + - "active" + - "pause" + - "drain" + example: "active" + example: + Availability: "active" + Name: "node-name" + Role: "manager" + Labels: + foo: "bar" + + Node: + type: "object" + properties: + ID: + type: "string" + example: "24ifsmvkjbyhk" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + description: | + Date and time at which the node was added to the swarm in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + UpdatedAt: + description: | + Date and time at which the node was last updated in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2017-08-09T07:09:37.632105588Z" + Spec: + $ref: "#/definitions/NodeSpec" + Description: + $ref: "#/definitions/NodeDescription" + Status: + $ref: "#/definitions/NodeStatus" + ManagerStatus: + $ref: "#/definitions/ManagerStatus" + + NodeDescription: + description: | + NodeDescription encapsulates the properties of the Node as reported by the + agent. + type: "object" + properties: + Hostname: + type: "string" + example: "bf3067039e47" + Platform: + $ref: "#/definitions/Platform" + Resources: + $ref: "#/definitions/ResourceObject" + Engine: + $ref: "#/definitions/EngineDescription" + TLSInfo: + $ref: "#/definitions/TLSInfo" + + Platform: + description: | + Platform represents the platform (Arch/OS). + type: "object" + properties: + Architecture: + description: | + Architecture represents the hardware architecture (for example, + `x86_64`). + type: "string" + example: "x86_64" + OS: + description: | + OS represents the Operating System (for example, `linux` or `windows`). + type: "string" + example: "linux" + + EngineDescription: + description: "EngineDescription provides information about an engine." + type: "object" + properties: + EngineVersion: + type: "string" + example: "17.06.0" + Labels: + type: "object" + additionalProperties: + type: "string" + example: + foo: "bar" + Plugins: + type: "array" + items: + type: "object" + properties: + Type: + type: "string" + Name: + type: "string" + example: + - Type: "Log" + Name: "awslogs" + - Type: "Log" + Name: "fluentd" + - Type: "Log" + Name: "gcplogs" + - Type: "Log" + Name: "gelf" + - Type: "Log" + Name: "journald" + - Type: "Log" + Name: "json-file" + - Type: "Log" + Name: "splunk" + - Type: "Log" + Name: "syslog" + - Type: "Network" + Name: "bridge" + - Type: "Network" + Name: "host" + - Type: "Network" + Name: "ipvlan" + - Type: "Network" + Name: "macvlan" + - Type: "Network" + Name: "null" + - Type: "Network" + Name: "overlay" + - Type: "Volume" + Name: "local" + - Type: "Volume" + Name: "localhost:5000/vieux/sshfs:latest" + - Type: "Volume" + Name: "vieux/sshfs:latest" + + TLSInfo: + description: | + Information about the issuer of leaf TLS certificates and the trusted root + CA certificate. + type: "object" + properties: + TrustRoot: + description: | + The root CA certificate(s) that are used to validate leaf TLS + certificates. + type: "string" + CertIssuerSubject: + description: + The base64-url-safe-encoded raw subject bytes of the issuer. + type: "string" + CertIssuerPublicKey: + description: | + The base64-url-safe-encoded raw public key bytes of the issuer. + type: "string" + example: + TrustRoot: | + -----BEGIN CERTIFICATE----- + MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw + EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 + MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH + A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf + 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB + Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO + PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz + pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H + -----END CERTIFICATE----- + CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" + CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" + + NodeStatus: + description: | + NodeStatus represents the status of a node. + + It provides the current status of the node, as seen by the manager. + type: "object" + properties: + State: + $ref: "#/definitions/NodeState" + Message: + type: "string" + example: "" + Addr: + description: "IP address of the node." + type: "string" + example: "172.17.0.2" + + NodeState: + description: "NodeState represents the state of a node." + type: "string" + enum: + - "unknown" + - "down" + - "ready" + - "disconnected" + example: "ready" + + ManagerStatus: + description: | + ManagerStatus represents the status of a manager. + + It provides the current status of a node's manager component, if the node + is a manager. + x-nullable: true + type: "object" + properties: + Leader: + type: "boolean" + default: false + example: true + Reachability: + $ref: "#/definitions/Reachability" + Addr: + description: | + The IP address and port at which the manager is reachable. + type: "string" + example: "10.0.0.46:2377" + + Reachability: + description: "Reachability represents the reachability of a node." + type: "string" + enum: + - "unknown" + - "unreachable" + - "reachable" + example: "reachable" + + SwarmSpec: + description: "User modifiable swarm configuration." + type: "object" + properties: + Name: + description: "Name of the swarm." + type: "string" + example: "default" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.corp.type: "production" + com.example.corp.department: "engineering" + Orchestration: + description: "Orchestration configuration." + type: "object" + x-nullable: true + properties: + TaskHistoryRetentionLimit: + description: | + The number of historic tasks to keep per instance or node. If + negative, never remove completed or failed tasks. + type: "integer" + format: "int64" + example: 10 + Raft: + description: "Raft configuration." + type: "object" + properties: + SnapshotInterval: + description: "The number of log entries between snapshots." + type: "integer" + format: "uint64" + example: 10000 + KeepOldSnapshots: + description: | + The number of snapshots to keep beyond the current snapshot. + type: "integer" + format: "uint64" + LogEntriesForSlowFollowers: + description: | + The number of log entries to keep around to sync up slow followers + after a snapshot is created. + type: "integer" + format: "uint64" + example: 500 + ElectionTick: + description: | + The number of ticks that a follower will wait for a message from + the leader before becoming a candidate and starting an election. + `ElectionTick` must be greater than `HeartbeatTick`. + + A tick currently defaults to one second, so these translate + directly to seconds currently, but this is NOT guaranteed. + type: "integer" + example: 3 + HeartbeatTick: + description: | + The number of ticks between heartbeats. Every HeartbeatTick ticks, + the leader will send a heartbeat to the followers. + + A tick currently defaults to one second, so these translate + directly to seconds currently, but this is NOT guaranteed. + type: "integer" + example: 1 + Dispatcher: + description: "Dispatcher configuration." + type: "object" + x-nullable: true + properties: + HeartbeatPeriod: + description: | + The delay for an agent to send a heartbeat to the dispatcher. + type: "integer" + format: "int64" + example: 5000000000 + CAConfig: + description: "CA configuration." + type: "object" + x-nullable: true + properties: + NodeCertExpiry: + description: "The duration node certificates are issued for." + type: "integer" + format: "int64" + example: 7776000000000000 + ExternalCAs: + description: | + Configuration for forwarding signing requests to an external + certificate authority. + type: "array" + items: + type: "object" + properties: + Protocol: + description: | + Protocol for communication with the external CA (currently + only `cfssl` is supported). + type: "string" + enum: + - "cfssl" + default: "cfssl" + URL: + description: | + URL where certificate signing requests should be sent. + type: "string" + Options: + description: | + An object with key/value pairs that are interpreted as + protocol-specific options for the external CA driver. + type: "object" + additionalProperties: + type: "string" + CACert: + description: | + The root CA certificate (in PEM format) this external CA uses + to issue TLS certificates (assumed to be to the current swarm + root CA certificate if not provided). + type: "string" + SigningCACert: + description: | + The desired signing CA certificate for all swarm node TLS leaf + certificates, in PEM format. + type: "string" + SigningCAKey: + description: | + The desired signing CA key for all swarm node TLS leaf certificates, + in PEM format. + type: "string" + ForceRotate: + description: | + An integer whose purpose is to force swarm to generate a new + signing CA certificate and key, if none have been specified in + `SigningCACert` and `SigningCAKey` + format: "uint64" + type: "integer" + EncryptionConfig: + description: "Parameters related to encryption-at-rest." + type: "object" + properties: + AutoLockManagers: + description: | + If set, generate a key and use it to lock data stored on the + managers. + type: "boolean" + example: false + TaskDefaults: + description: "Defaults for creating tasks in this cluster." + type: "object" + properties: + LogDriver: + description: | + The log driver to use for tasks created in the orchestrator if + unspecified by a service. + + Updating this value only affects new tasks. Existing tasks continue + to use their previously configured log driver until recreated. + type: "object" + properties: + Name: + description: | + The log driver to use as a default for new tasks. + type: "string" + example: "json-file" + Options: + description: | + Driver-specific options for the selected log driver, specified + as key/value pairs. + type: "object" + additionalProperties: + type: "string" + example: + "max-file": "10" + "max-size": "100m" + + # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but + # without `JoinTokens`. + ClusterInfo: + description: | + ClusterInfo represents information about the swarm as is returned by the + "/info" endpoint. Join-tokens are not included. + x-nullable: true + type: "object" + properties: + ID: + description: "The ID of the swarm." + type: "string" + example: "abajmipo7b4xz5ip2nrla6b11" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + description: | + Date and time at which the swarm was initialised in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + UpdatedAt: + description: | + Date and time at which the swarm was last updated in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2017-08-09T07:09:37.632105588Z" + Spec: + $ref: "#/definitions/SwarmSpec" + TLSInfo: + $ref: "#/definitions/TLSInfo" + RootRotationInProgress: + description: | + Whether there is currently a root CA rotation in progress for the swarm + type: "boolean" + example: false + DataPathPort: + description: | + DataPathPort specifies the data path port number for data traffic. + Acceptable port range is 1024 to 49151. + If no port is set or is set to 0, the default port (4789) is used. + type: "integer" + format: "uint32" + default: 4789 + example: 4789 + DefaultAddrPool: + description: | + Default Address Pool specifies default subnet pools for global scope + networks. + type: "array" + items: + type: "string" + format: "CIDR" + example: ["10.10.0.0/16", "20.20.0.0/16"] + SubnetSize: + description: | + SubnetSize specifies the subnet size of the networks created from the + default subnet pool. + type: "integer" + format: "uint32" + maximum: 29 + default: 24 + example: 24 + + JoinTokens: + description: | + JoinTokens contains the tokens workers and managers need to join the swarm. + type: "object" + properties: + Worker: + description: | + The token workers can use to join the swarm. + type: "string" + example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" + Manager: + description: | + The token managers can use to join the swarm. + type: "string" + example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + + Swarm: + type: "object" + allOf: + - $ref: "#/definitions/ClusterInfo" + - type: "object" + properties: + JoinTokens: + $ref: "#/definitions/JoinTokens" + + TaskSpec: + description: "User modifiable task configuration." + type: "object" + properties: + PluginSpec: + type: "object" + description: | + Plugin spec for the service. *(Experimental release only.)* + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + properties: + Name: + description: "The name or 'alias' to use for the plugin." + type: "string" + Remote: + description: "The plugin image reference to use." + type: "string" + Disabled: + description: "Disable the plugin once scheduled." + type: "boolean" + PluginPrivilege: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + ContainerSpec: + type: "object" + description: | + Container spec for the service. + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + properties: + Image: + description: "The image name to use for the container" + type: "string" + Labels: + description: "User-defined key/value data." + type: "object" + additionalProperties: + type: "string" + Command: + description: "The command to be run in the image." + type: "array" + items: + type: "string" + Args: + description: "Arguments to the command." + type: "array" + items: + type: "string" + Hostname: + description: | + The hostname to use for the container, as a valid + [RFC 1123](https://tools.ietf.org/html/rfc1123) hostname. + type: "string" + Env: + description: | + A list of environment variables in the form `VAR=value`. + type: "array" + items: + type: "string" + Dir: + description: "The working directory for commands to run in." + type: "string" + User: + description: "The user inside the container." + type: "string" + Groups: + type: "array" + description: | + A list of additional groups that the container process will run as. + items: + type: "string" + Privileges: + type: "object" + description: "Security options for the container" + properties: + CredentialSpec: + type: "object" + description: "CredentialSpec for managed service account (Windows only)" + properties: + Config: + type: "string" + example: "0bt9dmxjvjiqermk6xrop3ekq" + description: | + Load credential spec from a Swarm Config with the given ID. + The specified config must also be present in the Configs + field with the Runtime property set. + +


+ + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + File: + type: "string" + example: "spec.json" + description: | + Load credential spec from this file. The file is read by + the daemon, and must be present in the `CredentialSpecs` + subdirectory in the docker data directory, which defaults + to `C:\ProgramData\Docker\` on Windows. + + For example, specifying `spec.json` loads + `C:\ProgramData\Docker\CredentialSpecs\spec.json`. + +


+ + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + Registry: + type: "string" + description: | + Load credential spec from this value in the Windows + registry. The specified registry value must be located in: + + `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` + +


+ + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + SELinuxContext: + type: "object" + description: "SELinux labels of the container" + properties: + Disable: + type: "boolean" + description: "Disable SELinux" + User: + type: "string" + description: "SELinux user label" + Role: + type: "string" + description: "SELinux role label" + Type: + type: "string" + description: "SELinux type label" + Level: + type: "string" + description: "SELinux level label" + Seccomp: + type: "object" + description: "Options for configuring seccomp on the container" + properties: + Mode: + type: "string" + enum: + - "default" + - "unconfined" + - "custom" + Profile: + description: "The custom seccomp profile as a json object" + type: "string" + AppArmor: + type: "object" + description: "Options for configuring AppArmor on the container" + properties: + Mode: + type: "string" + enum: + - "default" + - "disabled" + NoNewPrivileges: + type: "boolean" + description: "Configuration of the no_new_privs bit in the container" + + TTY: + description: "Whether a pseudo-TTY should be allocated." + type: "boolean" + OpenStdin: + description: "Open `stdin`" + type: "boolean" + ReadOnly: + description: "Mount the container's root filesystem as read only." + type: "boolean" + Mounts: + description: | + Specification for mounts to be added to containers created as part + of the service. + type: "array" + items: + $ref: "#/definitions/Mount" + StopSignal: + description: "Signal to stop the container." + type: "string" + StopGracePeriod: + description: | + Amount of time to wait for the container to terminate before + forcefully killing it. + type: "integer" + format: "int64" + HealthCheck: + $ref: "#/definitions/HealthConfig" + Hosts: + type: "array" + description: | + A list of hostname/IP mappings to add to the container's `hosts` + file. The format of extra hosts is specified in the + [hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html) + man page: + + IP_address canonical_hostname [aliases...] + items: + type: "string" + DNSConfig: + description: | + Specification for DNS related configurations in resolver configuration + file (`resolv.conf`). + type: "object" + properties: + Nameservers: + description: "The IP addresses of the name servers." + type: "array" + items: + type: "string" + Search: + description: "A search list for host-name lookup." + type: "array" + items: + type: "string" + Options: + description: | + A list of internal resolver variables to be modified (e.g., + `debug`, `ndots:3`, etc.). + type: "array" + items: + type: "string" + Secrets: + description: | + Secrets contains references to zero or more secrets that will be + exposed to the service. + type: "array" + items: + type: "object" + properties: + File: + description: | + File represents a specific target that is backed by a file. + type: "object" + properties: + Name: + description: | + Name represents the final filename in the filesystem. + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + SecretID: + description: | + SecretID represents the ID of the specific secret that we're + referencing. + type: "string" + SecretName: + description: | + SecretName is the name of the secret that this references, + but this is just provided for lookup/display purposes. The + secret in the reference will be identified by its ID. + type: "string" + OomScoreAdj: + type: "integer" + format: "int64" + description: | + An integer value containing the score given to the container in + order to tune OOM killer preferences. + example: 0 + Configs: + description: | + Configs contains references to zero or more configs that will be + exposed to the service. + type: "array" + items: + type: "object" + properties: + File: + description: | + File represents a specific target that is backed by a file. + +


+ + > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive + type: "object" + properties: + Name: + description: | + Name represents the final filename in the filesystem. + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + Runtime: + description: | + Runtime represents a target that is not mounted into the + container but is used by the task + +


+ + > **Note**: `Configs.File` and `Configs.Runtime` are mutually + > exclusive + type: "object" + ConfigID: + description: | + ConfigID represents the ID of the specific config that we're + referencing. + type: "string" + ConfigName: + description: | + ConfigName is the name of the config that this references, + but this is just provided for lookup/display purposes. The + config in the reference will be identified by its ID. + type: "string" + Isolation: + type: "string" + description: | + Isolation technology of the containers running the service. + (Windows only) + enum: + - "default" + - "process" + - "hyperv" + - "" + Init: + description: | + Run an init inside the container that forwards signals and reaps + processes. This field is omitted if empty, and the default (as + configured on the daemon) is used. + type: "boolean" + x-nullable: true + Sysctls: + description: | + Set kernel namedspaced parameters (sysctls) in the container. + The Sysctls option on services accepts the same sysctls as the + are supported on containers. Note that while the same sysctls are + supported, no guarantees or checks are made about their + suitability for a clustered environment, and it's up to the user + to determine whether a given sysctl will work properly in a + Service. + type: "object" + additionalProperties: + type: "string" + # This option is not used by Windows containers + CapabilityAdd: + type: "array" + description: | + A list of kernel capabilities to add to the default set + for the container. + items: + type: "string" + example: + - "CAP_NET_RAW" + - "CAP_SYS_ADMIN" + - "CAP_SYS_CHROOT" + - "CAP_SYSLOG" + CapabilityDrop: + type: "array" + description: | + A list of kernel capabilities to drop from the default set + for the container. + items: + type: "string" + example: + - "CAP_NET_RAW" + Ulimits: + description: | + A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + NetworkAttachmentSpec: + description: | + Read-only spec type for non-swarm containers attached to swarm overlay + networks. + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + type: "object" + properties: + ContainerID: + description: "ID of the container represented by this task" + type: "string" + Resources: + description: | + Resource requirements which apply to each individual container created + as part of the service. + type: "object" + properties: + Limits: + description: "Define resources limits." + $ref: "#/definitions/Limit" + Reservations: + description: "Define resources reservation." + $ref: "#/definitions/ResourceObject" + RestartPolicy: + description: | + Specification for the restart policy which applies to containers + created as part of this service. + type: "object" + properties: + Condition: + description: "Condition for restart." + type: "string" + enum: + - "none" + - "on-failure" + - "any" + Delay: + description: "Delay between restart attempts." + type: "integer" + format: "int64" + MaxAttempts: + description: | + Maximum attempts to restart a given container before giving up + (default value is 0, which is ignored). + type: "integer" + format: "int64" + default: 0 + Window: + description: | + Windows is the time window used to evaluate the restart policy + (default value is 0, which is unbounded). + type: "integer" + format: "int64" + default: 0 + Placement: + type: "object" + properties: + Constraints: + description: | + An array of constraint expressions to limit the set of nodes where + a task can be scheduled. Constraint expressions can either use a + _match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find + nodes that satisfy every expression (AND match). Constraints can + match node or Docker Engine labels as follows: + + node attribute | matches | example + ---------------------|--------------------------------|----------------------------------------------- + `node.id` | Node ID | `node.id==2ivku8v2gvtg4` + `node.hostname` | Node hostname | `node.hostname!=node-2` + `node.role` | Node role (`manager`/`worker`) | `node.role==manager` + `node.platform.os` | Node operating system | `node.platform.os==windows` + `node.platform.arch` | Node architecture | `node.platform.arch==x86_64` + `node.labels` | User-defined node labels | `node.labels.security==high` + `engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-24.04` + + `engine.labels` apply to Docker Engine labels like operating system, + drivers, etc. Swarm administrators add `node.labels` for operational + purposes by using the [`node update endpoint`](#operation/NodeUpdate). + + type: "array" + items: + type: "string" + example: + - "node.hostname!=node3.corp.example.com" + - "node.role!=manager" + - "node.labels.type==production" + - "node.platform.os==linux" + - "node.platform.arch==x86_64" + Preferences: + description: | + Preferences provide a way to make the scheduler aware of factors + such as topology. They are provided in order from highest to + lowest precedence. + type: "array" + items: + type: "object" + properties: + Spread: + type: "object" + properties: + SpreadDescriptor: + description: | + label descriptor, such as `engine.labels.az`. + type: "string" + example: + - Spread: + SpreadDescriptor: "node.labels.datacenter" + - Spread: + SpreadDescriptor: "node.labels.rack" + MaxReplicas: + description: | + Maximum number of replicas for per node (default value is 0, which + is unlimited) + type: "integer" + format: "int64" + default: 0 + Platforms: + description: | + Platforms stores all the platforms that the service's image can + run on. This field is used in the platform filter for scheduling. + If empty, then the platform filter is off, meaning there are no + scheduling restrictions. + type: "array" + items: + $ref: "#/definitions/Platform" + ForceUpdate: + description: | + A counter that triggers an update even if no relevant parameters have + been changed. + type: "integer" + Runtime: + description: | + Runtime is the type of runtime specified for the task executor. + type: "string" + Networks: + description: "Specifies which networks the service should attach to." + type: "array" + items: + $ref: "#/definitions/NetworkAttachmentConfig" + LogDriver: + description: | + Specifies the log driver to use for tasks created from this spec. If + not present, the default one for the swarm will be used, finally + falling back to the engine default if not specified. + type: "object" + properties: + Name: + type: "string" + Options: + type: "object" + additionalProperties: + type: "string" + + TaskState: + type: "string" + enum: + - "new" + - "allocated" + - "pending" + - "assigned" + - "accepted" + - "preparing" + - "ready" + - "starting" + - "running" + - "complete" + - "shutdown" + - "failed" + - "rejected" + - "remove" + - "orphaned" + + ContainerStatus: + type: "object" + description: "represents the status of a container." + properties: + ContainerID: + type: "string" + PID: + type: "integer" + ExitCode: + type: "integer" + + PortStatus: + type: "object" + description: "represents the port status of a task's host ports whose service has published host ports" + properties: + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + + TaskStatus: + type: "object" + description: "represents the status of a task." + properties: + Timestamp: + type: "string" + format: "dateTime" + State: + $ref: "#/definitions/TaskState" + Message: + type: "string" + Err: + type: "string" + ContainerStatus: + $ref: "#/definitions/ContainerStatus" + PortStatus: + $ref: "#/definitions/PortStatus" + + Task: + type: "object" + properties: + ID: + description: "The ID of the task." + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Name: + description: "Name of the task." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Spec: + $ref: "#/definitions/TaskSpec" + ServiceID: + description: "The ID of the service this task is part of." + type: "string" + Slot: + type: "integer" + NodeID: + description: "The ID of the node that this task is on." + type: "string" + AssignedGenericResources: + $ref: "#/definitions/GenericResources" + Status: + $ref: "#/definitions/TaskStatus" + DesiredState: + $ref: "#/definitions/TaskState" + JobIteration: + description: | + If the Service this Task belongs to is a job-mode service, contains + the JobIteration of the Service this Task was created for. Absent if + the Task was created for a Replicated or Global Service. + $ref: "#/definitions/ObjectVersion" + example: + ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + AssignedGenericResources: + - DiscreteResourceSpec: + Kind: "SSD" + Value: 3 + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID1" + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID2" + + ServiceSpec: + description: "User modifiable configuration for a service." + type: object + properties: + Name: + description: "Name of the service." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + TaskTemplate: + $ref: "#/definitions/TaskSpec" + Mode: + description: "Scheduling mode for the service." + type: "object" + properties: + Replicated: + type: "object" + properties: + Replicas: + type: "integer" + format: "int64" + Global: + type: "object" + ReplicatedJob: + description: | + The mode used for services with a finite number of tasks that run + to a completed state. + type: "object" + properties: + MaxConcurrent: + description: | + The maximum number of replicas to run simultaneously. + type: "integer" + format: "int64" + default: 1 + TotalCompletions: + description: | + The total number of replicas desired to reach the Completed + state. If unset, will default to the value of `MaxConcurrent` + type: "integer" + format: "int64" + GlobalJob: + description: | + The mode used for services which run a task to the completed state + on each valid node. + type: "object" + UpdateConfig: + description: "Specification for the update strategy of the service." + type: "object" + properties: + Parallelism: + description: | + Maximum number of tasks to be updated in one iteration (0 means + unlimited parallelism). + type: "integer" + format: "int64" + Delay: + description: "Amount of time between updates, in nanoseconds." + type: "integer" + format: "int64" + FailureAction: + description: | + Action to take if an updated task fails to run, or stops running + during the update. + type: "string" + enum: + - "continue" + - "pause" + - "rollback" + Monitor: + description: | + Amount of time to monitor each updated task for failures, in + nanoseconds. + type: "integer" + format: "int64" + MaxFailureRatio: + description: | + The fraction of tasks that may fail during an update before the + failure action is invoked, specified as a floating point number + between 0 and 1. + type: "number" + default: 0 + Order: + description: | + The order of operations when rolling out an updated task. Either + the old task is shut down before the new task is started, or the + new task is started before the old task is shut down. + type: "string" + enum: + - "stop-first" + - "start-first" + RollbackConfig: + description: "Specification for the rollback strategy of the service." + type: "object" + properties: + Parallelism: + description: | + Maximum number of tasks to be rolled back in one iteration (0 means + unlimited parallelism). + type: "integer" + format: "int64" + Delay: + description: | + Amount of time between rollback iterations, in nanoseconds. + type: "integer" + format: "int64" + FailureAction: + description: | + Action to take if an rolled back task fails to run, or stops + running during the rollback. + type: "string" + enum: + - "continue" + - "pause" + Monitor: + description: | + Amount of time to monitor each rolled back task for failures, in + nanoseconds. + type: "integer" + format: "int64" + MaxFailureRatio: + description: | + The fraction of tasks that may fail during a rollback before the + failure action is invoked, specified as a floating point number + between 0 and 1. + type: "number" + default: 0 + Order: + description: | + The order of operations when rolling back a task. Either the old + task is shut down before the new task is started, or the new task + is started before the old task is shut down. + type: "string" + enum: + - "stop-first" + - "start-first" + Networks: + description: | + Specifies which networks the service should attach to. + + Deprecated: This field is deprecated since v1.44. The Networks field in TaskSpec should be used instead. + type: "array" + items: + $ref: "#/definitions/NetworkAttachmentConfig" + + EndpointSpec: + $ref: "#/definitions/EndpointSpec" + + EndpointPortConfig: + type: "object" + properties: + Name: + type: "string" + Protocol: + type: "string" + enum: + - "tcp" + - "udp" + - "sctp" + TargetPort: + description: "The port inside the container." + type: "integer" + PublishedPort: + description: "The port on the swarm hosts." + type: "integer" + PublishMode: + description: | + The mode in which port is published. + +


+ + - "ingress" makes the target port accessible on every node, + regardless of whether there is a task for the service running on + that node or not. + - "host" bypasses the routing mesh and publish the port directly on + the swarm node where that service is running. + + type: "string" + enum: + - "ingress" + - "host" + default: "ingress" + example: "ingress" + + EndpointSpec: + description: "Properties that can be configured to access and load balance a service." + type: "object" + properties: + Mode: + description: | + The mode of resolution to use for internal load balancing between tasks. + type: "string" + enum: + - "vip" + - "dnsrr" + default: "vip" + Ports: + description: | + List of exposed ports that this service is accessible on from the + outside. Ports can only be provided if `vip` resolution mode is used. + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + + Service: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ServiceSpec" + Endpoint: + type: "object" + properties: + Spec: + $ref: "#/definitions/EndpointSpec" + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + VirtualIPs: + type: "array" + items: + type: "object" + properties: + NetworkID: + type: "string" + Addr: + type: "string" + UpdateStatus: + description: "The status of a service update." + type: "object" + properties: + State: + type: "string" + enum: + - "updating" + - "paused" + - "completed" + StartedAt: + type: "string" + format: "dateTime" + CompletedAt: + type: "string" + format: "dateTime" + Message: + type: "string" + ServiceStatus: + description: | + The status of the service's tasks. Provided only when requested as + part of a ServiceList operation. + type: "object" + properties: + RunningTasks: + description: | + The number of tasks for the service currently in the Running state. + type: "integer" + format: "uint64" + example: 7 + DesiredTasks: + description: | + The number of tasks for the service desired to be running. + For replicated services, this is the replica count from the + service spec. For global services, this is computed by taking + count of all tasks for the service with a Desired State other + than Shutdown. + type: "integer" + format: "uint64" + example: 10 + CompletedTasks: + description: | + The number of tasks for a job that are in the Completed state. + This field must be cross-referenced with the service type, as the + value of 0 may mean the service is not in a job mode, or it may + mean the job-mode service has no tasks yet Completed. + type: "integer" + format: "uint64" + JobStatus: + description: | + The status of the service when it is in one of ReplicatedJob or + GlobalJob modes. Absent on Replicated and Global mode services. The + JobIteration is an ObjectVersion, but unlike the Service's version, + does not need to be sent with an update request. + type: "object" + properties: + JobIteration: + description: | + JobIteration is a value increased each time a Job is executed, + successfully or otherwise. "Executed", in this case, means the + job as a whole has been started, not that an individual Task has + been launched. A job is "Executed" when its ServiceSpec is + updated. JobIteration can be used to disambiguate Tasks belonging + to different executions of a job. Though JobIteration will + increase with each subsequent execution, it may not necessarily + increase by 1, and so JobIteration should not be used to + $ref: "#/definitions/ObjectVersion" + LastExecution: + description: | + The last time, as observed by the server, that this job was + started. + type: "string" + format: "dateTime" + example: + ID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Version: + Index: 19 + CreatedAt: "2016-06-07T21:05:51.880065305Z" + UpdatedAt: "2016-06-07T21:07:29.962229872Z" + Spec: + Name: "hopeful_cori" + TaskTemplate: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Endpoint: + Spec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + VirtualIPs: + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.2/16" + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.3/16" + + ImageDeleteResponseItem: + type: "object" + x-go-name: "DeleteResponse" + properties: + Untagged: + description: "The image ID of an image that was untagged" + type: "string" + Deleted: + description: "The image ID of an image that was deleted" + type: "string" + + ServiceCreateResponse: + type: "object" + description: | + contains the information returned to a client on the + creation of a new service. + properties: + ID: + description: "The ID of the created service." + type: "string" + x-nullable: false + example: "ak7w3gjqoa3kuz8xcpnyy0pvl" + Warnings: + description: | + Optional warning message. + + FIXME(thaJeztah): this should have "omitempty" in the generated type. + type: "array" + x-nullable: true + items: + type: "string" + example: + - "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + + ServiceUpdateResponse: + type: "object" + properties: + Warnings: + description: "Optional warning messages" + type: "array" + items: + type: "string" + example: + Warnings: + - "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + + ContainerInspectResponse: + type: "object" + title: "ContainerInspectResponse" + x-go-name: "InspectResponse" + properties: + Id: + description: |- + The ID of this container as a 128-bit (64-character) hexadecimal string (32 bytes). + type: "string" + x-go-name: "ID" + minLength: 64 + maxLength: 64 + pattern: "^[0-9a-fA-F]{64}$" + example: "aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf" + Created: + description: |- + Date and time at which the container was created, formatted in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + x-nullable: true + example: "2025-02-17T17:43:39.64001363Z" + Path: + description: |- + The path to the command being run + type: "string" + example: "/bin/sh" + Args: + description: "The arguments to the command being run" + type: "array" + items: + type: "string" + example: + - "-c" + - "exit 9" + State: + $ref: "#/definitions/ContainerState" + Image: + description: |- + The ID (digest) of the image that this container was created from. + type: "string" + example: "sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782" + ResolvConfPath: + description: |- + Location of the `/etc/resolv.conf` generated for the container on the + host. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/resolv.conf" + HostnamePath: + description: |- + Location of the `/etc/hostname` generated for the container on the + host. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/hostname" + HostsPath: + description: |- + Location of the `/etc/hosts` generated for the container on the + host. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/hosts" + LogPath: + description: |- + Location of the file used to buffer the container's logs. Depending on + the logging-driver used for the container, this field may be omitted. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + x-nullable: true + example: "/var/lib/docker/containers/5b7c7e2b992aa426584ce6c47452756066be0e503a08b4516a433a54d2f69e59/5b7c7e2b992aa426584ce6c47452756066be0e503a08b4516a433a54d2f69e59-json.log" + Name: + description: |- + The name associated with this container. + + For historic reasons, the name may be prefixed with a forward-slash (`/`). + type: "string" + example: "/funny_chatelet" + RestartCount: + description: |- + Number of times the container was restarted since it was created, + or since daemon was started. + type: "integer" + example: 0 + Driver: + description: |- + The storage-driver used for the container's filesystem (graph-driver + or snapshotter). + type: "string" + example: "overlayfs" + Platform: + description: |- + The platform (operating system) for which the container was created. + + This field was introduced for the experimental "LCOW" (Linux Containers + On Windows) features, which has been removed. In most cases, this field + is equal to the host's operating system (`linux` or `windows`). + type: "string" + example: "linux" + ImageManifestDescriptor: + $ref: "#/definitions/OCIDescriptor" + description: |- + OCI descriptor of the platform-specific manifest of the image + the container was created from. + + Note: Only available if the daemon provides a multi-platform + image store. + MountLabel: + description: |- + SELinux mount label set for the container. + type: "string" + example: "" + ProcessLabel: + description: |- + SELinux process label set for the container. + type: "string" + example: "" + AppArmorProfile: + description: |- + The AppArmor profile set for the container. + type: "string" + example: "" + ExecIDs: + description: |- + IDs of exec instances that are running in the container. + type: "array" + items: + type: "string" + x-nullable: true + example: + - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" + - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" + HostConfig: + $ref: "#/definitions/HostConfig" + GraphDriver: + $ref: "#/definitions/DriverData" + SizeRw: + description: |- + The size of files that have been created or changed by this container. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "122880" + SizeRootFs: + description: |- + The total size of all files in the read-only layers from the image + that the container uses. These layers can be shared between containers. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "1653948416" + Mounts: + description: |- + List of mounts used by the container. + type: "array" + items: + $ref: "#/definitions/MountPoint" + Config: + $ref: "#/definitions/ContainerConfig" + NetworkSettings: + $ref: "#/definitions/NetworkSettings" + + ContainerSummary: + type: "object" + properties: + Id: + description: |- + The ID of this container as a 128-bit (64-character) hexadecimal string (32 bytes). + type: "string" + x-go-name: "ID" + minLength: 64 + maxLength: 64 + pattern: "^[0-9a-fA-F]{64}$" + example: "aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf" + Names: + description: |- + The names associated with this container. Most containers have a single + name, but when using legacy "links", the container can have multiple + names. + + For historic reasons, names are prefixed with a forward-slash (`/`). + type: "array" + items: + type: "string" + example: + - "/funny_chatelet" + Image: + description: |- + The name or ID of the image used to create the container. + + This field shows the image reference as was specified when creating the container, + which can be in its canonical form (e.g., `docker.io/library/ubuntu:latest` + or `docker.io/library/ubuntu@sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782`), + short form (e.g., `ubuntu:latest`)), or the ID(-prefix) of the image (e.g., `72297848456d`). + + The content of this field can be updated at runtime if the image used to + create the container is untagged, in which case the field is updated to + contain the the image ID (digest) it was resolved to in its canonical, + non-truncated form (e.g., `sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782`). + type: "string" + example: "docker.io/library/ubuntu:latest" + ImageID: + description: |- + The ID (digest) of the image that this container was created from. + type: "string" + example: "sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782" + ImageManifestDescriptor: + $ref: "#/definitions/OCIDescriptor" + x-nullable: true + description: | + OCI descriptor of the platform-specific manifest of the image + the container was created from. + + Note: Only available if the daemon provides a multi-platform + image store. + + This field is not populated in the `GET /system/df` endpoint. + Command: + description: "Command to run when starting the container" + type: "string" + example: "/bin/bash" + Created: + description: |- + Date and time at which the container was created as a Unix timestamp + (number of seconds since EPOCH). + type: "integer" + format: "int64" + example: "1739811096" + Ports: + description: |- + Port-mappings for the container. + type: "array" + items: + $ref: "#/definitions/Port" + SizeRw: + description: |- + The size of files that have been created or changed by this container. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "122880" + SizeRootFs: + description: |- + The total size of all files in the read-only layers from the image + that the container uses. These layers can be shared between containers. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "1653948416" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + State: + description: | + The state of this container. + type: "string" + enum: + - "created" + - "running" + - "paused" + - "restarting" + - "exited" + - "removing" + - "dead" + example: "running" + Status: + description: |- + Additional human-readable status of this container (e.g. `Exit 0`) + type: "string" + example: "Up 4 days" + HostConfig: + type: "object" + description: |- + Summary of host-specific runtime information of the container. This + is a reduced set of information in the container's "HostConfig" as + available in the container "inspect" response. + properties: + NetworkMode: + description: |- + Networking mode (`host`, `none`, `container:`) or name of the + primary network the container is using. + + This field is primarily for backward compatibility. The container + can be connected to multiple networks for which information can be + found in the `NetworkSettings.Networks` field, which enumerates + settings per network. + type: "string" + example: "mynetwork" + Annotations: + description: |- + Arbitrary key-value metadata attached to the container. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + io.kubernetes.docker.type: "container" + io.kubernetes.sandbox.id: "3befe639bed0fd6afdd65fd1fa84506756f59360ec4adc270b0fdac9be22b4d3" + NetworkSettings: + description: |- + Summary of the container's network settings + type: "object" + properties: + Networks: + type: "object" + description: |- + Summary of network-settings for each network the container is + attached to. + additionalProperties: + $ref: "#/definitions/EndpointSettings" + Mounts: + type: "array" + description: |- + List of mounts used by the container. + items: + $ref: "#/definitions/MountPoint" + + Driver: + description: "Driver represents a driver (network, logging, secrets)." + type: "object" + required: [Name] + properties: + Name: + description: "Name of the driver." + type: "string" + x-nullable: false + example: "some-driver" + Options: + description: "Key/value map of driver-specific options." + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: + OptionA: "value for driver-specific option A" + OptionB: "value for driver-specific option B" + + SecretSpec: + type: "object" + properties: + Name: + description: "User-defined name of the secret." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Data: + description: | + Data is the data to store as a secret, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + It must be empty if the Driver field is set, in which case the data is + loaded from an external secret store. The maximum allowed size is 500KB, + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). + + This field is only used to _create_ a secret, and is not returned by + other endpoints. + type: "string" + example: "" + Driver: + description: | + Name of the secrets driver used to fetch the secret's value from an + external secret store. + $ref: "#/definitions/Driver" + Templating: + description: | + Templating driver, if applicable + + Templating controls whether and how to evaluate the config payload as + a template. If no driver is set, no templating is used. + $ref: "#/definitions/Driver" + + Secret: + type: "object" + properties: + ID: + type: "string" + example: "blt1owaxmitz71s9v5zh81zun" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + example: "2017-07-20T13:55:28.678958722Z" + UpdatedAt: + type: "string" + format: "dateTime" + example: "2017-07-20T13:55:28.678958722Z" + Spec: + $ref: "#/definitions/SecretSpec" + + ConfigSpec: + type: "object" + properties: + Name: + description: "User-defined name of the config." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Data: + description: | + Data is the data to store as a config, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). + type: "string" + Templating: + description: | + Templating driver, if applicable + + Templating controls whether and how to evaluate the config payload as + a template. If no driver is set, no templating is used. + $ref: "#/definitions/Driver" + + Config: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ConfigSpec" + + ContainerState: + description: | + ContainerState stores container's running state. It's part of ContainerJSONBase + and will be returned by the "inspect" command. + type: "object" + x-nullable: true + properties: + Status: + description: | + String representation of the container state. Can be one of "created", + "running", "paused", "restarting", "removing", "exited", or "dead". + type: "string" + enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"] + example: "running" + Running: + description: | + Whether this container is running. + + Note that a running container can be _paused_. The `Running` and `Paused` + booleans are not mutually exclusive: + + When pausing a container (on Linux), the freezer cgroup is used to suspend + all processes in the container. Freezing the process requires the process to + be running. As a result, paused containers are both `Running` _and_ `Paused`. + + Use the `Status` field instead to determine if a container's state is "running". + type: "boolean" + example: true + Paused: + description: "Whether this container is paused." + type: "boolean" + example: false + Restarting: + description: "Whether this container is restarting." + type: "boolean" + example: false + OOMKilled: + description: | + Whether a process within this container has been killed because it ran + out of memory since the container was last started. + type: "boolean" + example: false + Dead: + type: "boolean" + example: false + Pid: + description: "The process ID of this container" + type: "integer" + example: 1234 + ExitCode: + description: "The last exit code of this container" + type: "integer" + example: 0 + Error: + type: "string" + StartedAt: + description: "The time when this container was last started." + type: "string" + example: "2020-01-06T09:06:59.461876391Z" + FinishedAt: + description: "The time when this container last exited." + type: "string" + example: "2020-01-06T09:07:59.461876391Z" + Health: + $ref: "#/definitions/Health" + + ContainerCreateResponse: + description: "OK response to ContainerCreate operation" + type: "object" + title: "ContainerCreateResponse" + x-go-name: "CreateResponse" + required: [Id, Warnings] + properties: + Id: + description: "The ID of the created container" + type: "string" + x-nullable: false + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Warnings: + description: "Warnings encountered when creating the container" + type: "array" + x-nullable: false + items: + type: "string" + example: [] + + ContainerUpdateResponse: + type: "object" + title: "ContainerUpdateResponse" + x-go-name: "UpdateResponse" + description: |- + Response for a successful container-update. + properties: + Warnings: + type: "array" + description: |- + Warnings encountered when updating the container. + items: + type: "string" + example: ["Published ports are discarded when using host network mode"] + + ContainerStatsResponse: + description: | + Statistics sample for a container. + type: "object" + x-go-name: "StatsResponse" + title: "ContainerStatsResponse" + properties: + name: + description: "Name of the container" + type: "string" + x-nullable: true + example: "boring_wozniak" + id: + description: "ID of the container" + type: "string" + x-nullable: true + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + read: + description: | + Date and time at which this sample was collected. + The value is formatted as [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + with nano-seconds. + type: "string" + format: "date-time" + example: "2025-01-16T13:55:22.165243637Z" + preread: + description: | + Date and time at which this first sample was collected. This field + is not propagated if the "one-shot" option is set. If the "one-shot" + option is set, this field may be omitted, empty, or set to a default + date (`0001-01-01T00:00:00Z`). + + The value is formatted as [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + with nano-seconds. + type: "string" + format: "date-time" + example: "2025-01-16T13:55:21.160452595Z" + pids_stats: + $ref: "#/definitions/ContainerPidsStats" + blkio_stats: + $ref: "#/definitions/ContainerBlkioStats" + num_procs: + description: | + The number of processors on the system. + + This field is Windows-specific and always zero for Linux containers. + type: "integer" + format: "uint32" + example: 16 + storage_stats: + $ref: "#/definitions/ContainerStorageStats" + cpu_stats: + $ref: "#/definitions/ContainerCPUStats" + precpu_stats: + $ref: "#/definitions/ContainerCPUStats" + memory_stats: + $ref: "#/definitions/ContainerMemoryStats" + networks: + description: | + Network statistics for the container per interface. + + This field is omitted if the container has no networking enabled. + x-nullable: true + additionalProperties: + $ref: "#/definitions/ContainerNetworkStats" + example: + eth0: + rx_bytes: 5338 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 36 + tx_bytes: 648 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 8 + eth5: + rx_bytes: 4641 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 26 + tx_bytes: 690 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 9 + + ContainerBlkioStats: + description: | + BlkioStats stores all IO service stats for data read and write. + + This type is Linux-specific and holds many fields that are specific to cgroups v1. + On a cgroup v2 host, all fields other than `io_service_bytes_recursive` + are omitted or `null`. + + This type is only populated on Linux and omitted for Windows containers. + type: "object" + x-go-name: "BlkioStats" + x-nullable: true + properties: + io_service_bytes_recursive: + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_serviced_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_queue_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_service_time_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_wait_time_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_merged_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + io_time_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + sectors_recursive: + description: | + This field is only available when using Linux containers with + cgroups v1. It is omitted or `null` when using cgroups v2. + x-nullable: true + type: "array" + items: + $ref: "#/definitions/ContainerBlkioStatEntry" + example: + io_service_bytes_recursive: [ + {"major": 254, "minor": 0, "op": "read", "value": 7593984}, + {"major": 254, "minor": 0, "op": "write", "value": 100} + ] + io_serviced_recursive: null + io_queue_recursive: null + io_service_time_recursive: null + io_wait_time_recursive: null + io_merged_recursive: null + io_time_recursive: null + sectors_recursive: null + + ContainerBlkioStatEntry: + description: | + Blkio stats entry. + + This type is Linux-specific and omitted for Windows containers. + type: "object" + x-go-name: "BlkioStatEntry" + x-nullable: true + properties: + major: + type: "integer" + format: "uint64" + example: 254 + minor: + type: "integer" + format: "uint64" + example: 0 + op: + type: "string" + example: "read" + value: + type: "integer" + format: "uint64" + example: 7593984 + + ContainerCPUStats: + description: | + CPU related info of the container + type: "object" + x-go-name: "CPUStats" + x-nullable: true + properties: + cpu_usage: + $ref: "#/definitions/ContainerCPUUsage" + system_cpu_usage: + description: | + System Usage. + + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 5 + online_cpus: + description: | + Number of online CPUs. + + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint32" + x-nullable: true + example: 5 + throttling_data: + $ref: "#/definitions/ContainerThrottlingData" + + ContainerCPUUsage: + description: | + All CPU stats aggregated since container inception. + type: "object" + x-go-name: "CPUUsage" + x-nullable: true + properties: + total_usage: + description: | + Total CPU time consumed in nanoseconds (Linux) or 100's of nanoseconds (Windows). + type: "integer" + format: "uint64" + example: 29912000 + percpu_usage: + description: | + Total CPU time (in nanoseconds) consumed per core (Linux). + + This field is Linux-specific when using cgroups v1. It is omitted + when using cgroups v2 and Windows containers. + type: "array" + x-nullable: true + items: + type: "integer" + format: "uint64" + example: 29912000 + + usage_in_kernelmode: + description: | + Time (in nanoseconds) spent by tasks of the cgroup in kernel mode (Linux), + or time spent (in 100's of nanoseconds) by all container processes in + kernel mode (Windows). + + Not populated for Windows containers using Hyper-V isolation. + type: "integer" + format: "uint64" + example: 21994000 + usage_in_usermode: + description: | + Time (in nanoseconds) spent by tasks of the cgroup in user mode (Linux), + or time spent (in 100's of nanoseconds) by all container processes in + kernel mode (Windows). + + Not populated for Windows containers using Hyper-V isolation. + type: "integer" + format: "uint64" + example: 7918000 + + ContainerPidsStats: + description: | + PidsStats contains Linux-specific stats of a container's process-IDs (PIDs). + + This type is Linux-specific and omitted for Windows containers. + type: "object" + x-go-name: "PidsStats" + x-nullable: true + properties: + current: + description: | + Current is the number of PIDs in the cgroup. + type: "integer" + format: "uint64" + x-nullable: true + example: 5 + limit: + description: | + Limit is the hard limit on the number of pids in the cgroup. + A "Limit" of 0 means that there is no limit. + type: "integer" + format: "uint64" + x-nullable: true + example: "18446744073709551615" + + ContainerThrottlingData: + description: | + CPU throttling stats of the container. + + This type is Linux-specific and omitted for Windows containers. + type: "object" + x-go-name: "ThrottlingData" + x-nullable: true + properties: + periods: + description: | + Number of periods with throttling active. + type: "integer" + format: "uint64" + example: 0 + throttled_periods: + description: | + Number of periods when the container hit its throttling limit. + type: "integer" + format: "uint64" + example: 0 + throttled_time: + description: | + Aggregated time (in nanoseconds) the container was throttled for. + type: "integer" + format: "uint64" + example: 0 + + ContainerMemoryStats: + description: | + Aggregates all memory stats since container inception on Linux. + Windows returns stats for commit and private working set only. + type: "object" + x-go-name: "MemoryStats" + properties: + usage: + description: | + Current `res_counter` usage for memory. + + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + max_usage: + description: | + Maximum usage ever recorded. + + This field is Linux-specific and only supported on cgroups v1. + It is omitted when using cgroups v2 and for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + stats: + description: | + All the stats exported via memory.stat. when using cgroups v2. + + This field is Linux-specific and omitted for Windows containers. + type: "object" + additionalProperties: + type: "integer" + format: "uint64" + x-nullable: true + example: + { + "active_anon": 1572864, + "active_file": 5115904, + "anon": 1572864, + "anon_thp": 0, + "file": 7626752, + "file_dirty": 0, + "file_mapped": 2723840, + "file_writeback": 0, + "inactive_anon": 0, + "inactive_file": 2510848, + "kernel_stack": 16384, + "pgactivate": 0, + "pgdeactivate": 0, + "pgfault": 2042, + "pglazyfree": 0, + "pglazyfreed": 0, + "pgmajfault": 45, + "pgrefill": 0, + "pgscan": 0, + "pgsteal": 0, + "shmem": 0, + "slab": 1180928, + "slab_reclaimable": 725576, + "slab_unreclaimable": 455352, + "sock": 0, + "thp_collapse_alloc": 0, + "thp_fault_alloc": 1, + "unevictable": 0, + "workingset_activate": 0, + "workingset_nodereclaim": 0, + "workingset_refault": 0 + } + failcnt: + description: | + Number of times memory usage hits limits. + + This field is Linux-specific and only supported on cgroups v1. + It is omitted when using cgroups v2 and for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + limit: + description: | + This field is Linux-specific and omitted for Windows containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 8217579520 + commitbytes: + description: | + Committed bytes. + + This field is Windows-specific and omitted for Linux containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + commitpeakbytes: + description: | + Peak committed bytes. + + This field is Windows-specific and omitted for Linux containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + privateworkingset: + description: | + Private working set. + + This field is Windows-specific and omitted for Linux containers. + type: "integer" + format: "uint64" + x-nullable: true + example: 0 + + ContainerNetworkStats: + description: | + Aggregates the network stats of one container + type: "object" + x-go-name: "NetworkStats" + x-nullable: true + properties: + rx_bytes: + description: | + Bytes received. Windows and Linux. + type: "integer" + format: "uint64" + example: 5338 + rx_packets: + description: | + Packets received. Windows and Linux. + type: "integer" + format: "uint64" + example: 36 + rx_errors: + description: | + Received errors. Not used on Windows. + + This field is Linux-specific and always zero for Windows containers. + type: "integer" + format: "uint64" + example: 0 + rx_dropped: + description: | + Incoming packets dropped. Windows and Linux. + type: "integer" + format: "uint64" + example: 0 + tx_bytes: + description: | + Bytes sent. Windows and Linux. + type: "integer" + format: "uint64" + example: 1200 + tx_packets: + description: | + Packets sent. Windows and Linux. + type: "integer" + format: "uint64" + example: 12 + tx_errors: + description: | + Sent errors. Not used on Windows. + + This field is Linux-specific and always zero for Windows containers. + type: "integer" + format: "uint64" + example: 0 + tx_dropped: + description: | + Outgoing packets dropped. Windows and Linux. + type: "integer" + format: "uint64" + example: 0 + endpoint_id: + description: | + Endpoint ID. Not used on Linux. + + This field is Windows-specific and omitted for Linux containers. + type: "string" + x-nullable: true + instance_id: + description: | + Instance ID. Not used on Linux. + + This field is Windows-specific and omitted for Linux containers. + type: "string" + x-nullable: true + + ContainerStorageStats: + description: | + StorageStats is the disk I/O stats for read/write on Windows. + + This type is Windows-specific and omitted for Linux containers. + type: "object" + x-go-name: "StorageStats" + x-nullable: true + properties: + read_count_normalized: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + read_size_bytes: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + write_count_normalized: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + write_size_bytes: + type: "integer" + format: "uint64" + x-nullable: true + example: 7593984 + + ContainerTopResponse: + type: "object" + x-go-name: "TopResponse" + title: "ContainerTopResponse" + description: |- + Container "top" response. + properties: + Titles: + description: "The ps column titles" + type: "array" + items: + type: "string" + example: + Titles: + - "UID" + - "PID" + - "PPID" + - "C" + - "STIME" + - "TTY" + - "TIME" + - "CMD" + Processes: + description: |- + Each process running in the container, where each process + is an array of values corresponding to the titles. + type: "array" + items: + type: "array" + items: + type: "string" + example: + Processes: + - + - "root" + - "13642" + - "882" + - "0" + - "17:03" + - "pts/0" + - "00:00:00" + - "/bin/bash" + - + - "root" + - "13735" + - "13642" + - "0" + - "17:06" + - "pts/0" + - "00:00:00" + - "sleep 10" + + ContainerWaitResponse: + description: "OK response to ContainerWait operation" + type: "object" + x-go-name: "WaitResponse" + title: "ContainerWaitResponse" + required: [StatusCode] + properties: + StatusCode: + description: "Exit code of the container" + type: "integer" + format: "int64" + x-nullable: false + Error: + $ref: "#/definitions/ContainerWaitExitError" + + ContainerWaitExitError: + description: "container waiting error, if any" + type: "object" + x-go-name: "WaitExitError" + properties: + Message: + description: "Details of an error" + type: "string" + + SystemVersion: + type: "object" + description: | + Response of Engine API: GET "/version" + properties: + Platform: + type: "object" + required: [Name] + properties: + Name: + type: "string" + Components: + type: "array" + description: | + Information about system components + items: + type: "object" + x-go-name: ComponentVersion + required: [Name, Version] + properties: + Name: + description: | + Name of the component + type: "string" + example: "Engine" + Version: + description: | + Version of the component + type: "string" + x-nullable: false + example: "27.0.1" + Details: + description: | + Key/value pairs of strings with additional information about the + component. These values are intended for informational purposes + only, and their content is not defined, and not part of the API + specification. + + These messages can be printed by the client as information to the user. + type: "object" + x-nullable: true + Version: + description: "The version of the daemon" + type: "string" + example: "27.0.1" + ApiVersion: + description: | + The default (and highest) API version that is supported by the daemon + type: "string" + example: "1.47" + MinAPIVersion: + description: | + The minimum API version that is supported by the daemon + type: "string" + example: "1.24" + GitCommit: + description: | + The Git commit of the source code that was used to build the daemon + type: "string" + example: "48a66213fe" + GoVersion: + description: | + The version Go used to compile the daemon, and the version of the Go + runtime in use. + type: "string" + example: "go1.22.7" + Os: + description: | + The operating system that the daemon is running on ("linux" or "windows") + type: "string" + example: "linux" + Arch: + description: | + The architecture that the daemon is running on + type: "string" + example: "amd64" + KernelVersion: + description: | + The kernel version (`uname -r`) that the daemon is running on. + + This field is omitted when empty. + type: "string" + example: "6.8.0-31-generic" + Experimental: + description: | + Indicates if the daemon is started with experimental features enabled. + + This field is omitted when empty / false. + type: "boolean" + example: true + BuildTime: + description: | + The date and time that the daemon was compiled. + type: "string" + example: "2020-06-22T15:49:27.000000000+00:00" + + SystemInfo: + type: "object" + properties: + ID: + description: | + Unique identifier of the daemon. + +


+ + > **Note**: The format of the ID itself is not part of the API, and + > should not be considered stable. + type: "string" + example: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" + Containers: + description: "Total number of containers on the host." + type: "integer" + example: 14 + ContainersRunning: + description: | + Number of containers with status `"running"`. + type: "integer" + example: 3 + ContainersPaused: + description: | + Number of containers with status `"paused"`. + type: "integer" + example: 1 + ContainersStopped: + description: | + Number of containers with status `"stopped"`. + type: "integer" + example: 10 + Images: + description: | + Total number of images on the host. + + Both _tagged_ and _untagged_ (dangling) images are counted. + type: "integer" + example: 508 + Driver: + description: "Name of the storage driver in use." + type: "string" + example: "overlay2" + DriverStatus: + description: | + Information specific to the storage driver, provided as + "label" / "value" pairs. + + This information is provided by the storage driver, and formatted + in a way consistent with the output of `docker info` on the command + line. + +


+ + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "array" + items: + type: "array" + items: + type: "string" + example: + - ["Backing Filesystem", "extfs"] + - ["Supports d_type", "true"] + - ["Native Overlay Diff", "true"] + DockerRootDir: + description: | + Root directory of persistent Docker state. + + Defaults to `/var/lib/docker` on Linux, and `C:\ProgramData\docker` + on Windows. + type: "string" + example: "/var/lib/docker" + Plugins: + $ref: "#/definitions/PluginsInfo" + MemoryLimit: + description: "Indicates if the host has memory limit support enabled." + type: "boolean" + example: true + SwapLimit: + description: "Indicates if the host has memory swap limit support enabled." + type: "boolean" + example: true + KernelMemoryTCP: + description: | + Indicates if the host has kernel memory TCP limit support enabled. This + field is omitted if not supported. + + Kernel memory TCP limits are not supported when using cgroups v2, which + does not support the corresponding `memory.kmem.tcp.limit_in_bytes` cgroup. + type: "boolean" + example: true + CpuCfsPeriod: + description: | + Indicates if CPU CFS(Completely Fair Scheduler) period is supported by + the host. + type: "boolean" + example: true + CpuCfsQuota: + description: | + Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by + the host. + type: "boolean" + example: true + CPUShares: + description: | + Indicates if CPU Shares limiting is supported by the host. + type: "boolean" + example: true + CPUSet: + description: | + Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host. + + See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt) + type: "boolean" + example: true + PidsLimit: + description: "Indicates if the host kernel has PID limit support enabled." + type: "boolean" + example: true + OomKillDisable: + description: "Indicates if OOM killer disable is supported on the host." + type: "boolean" + IPv4Forwarding: + description: "Indicates IPv4 forwarding is enabled." + type: "boolean" + example: true + BridgeNfIptables: + description: | + Indicates if `bridge-nf-call-iptables` is available on the host when + the daemon was started. + +


+ + > **Deprecated**: netfilter module is now loaded on-demand and no longer + > during daemon startup, making this field obsolete. This field is always + > `false` and will be removed in a API v1.49. + type: "boolean" + example: false + BridgeNfIp6tables: + description: | + Indicates if `bridge-nf-call-ip6tables` is available on the host. + +


+ + > **Deprecated**: netfilter module is now loaded on-demand, and no longer + > during daemon startup, making this field obsolete. This field is always + > `false` and will be removed in a API v1.49. + type: "boolean" + example: false + Debug: + description: | + Indicates if the daemon is running in debug-mode / with debug-level + logging enabled. + type: "boolean" + example: true + NFd: + description: | + The total number of file Descriptors in use by the daemon process. + + This information is only returned if debug-mode is enabled. + type: "integer" + example: 64 + NGoroutines: + description: | + The number of goroutines that currently exist. + + This information is only returned if debug-mode is enabled. + type: "integer" + example: 174 + SystemTime: + description: | + Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + format with nano-seconds. + type: "string" + example: "2017-08-08T20:28:29.06202363Z" + LoggingDriver: + description: | + The logging driver to use as a default for new containers. + type: "string" + CgroupDriver: + description: | + The driver to use for managing cgroups. + type: "string" + enum: ["cgroupfs", "systemd", "none"] + default: "cgroupfs" + example: "cgroupfs" + CgroupVersion: + description: | + The version of the cgroup. + type: "string" + enum: ["1", "2"] + default: "1" + example: "1" + NEventsListener: + description: "Number of event listeners subscribed." + type: "integer" + example: 30 + KernelVersion: + description: | + Kernel version of the host. + + On Linux, this information obtained from `uname`. On Windows this + information is queried from the HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\ + registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_. + type: "string" + example: "6.8.0-31-generic" + OperatingSystem: + description: | + Name of the host's operating system, for example: "Ubuntu 24.04 LTS" + or "Windows Server 2016 Datacenter" + type: "string" + example: "Ubuntu 24.04 LTS" + OSVersion: + description: | + Version of the host's operating system + +


+ + > **Note**: The information returned in this field, including its + > very existence, and the formatting of values, should not be considered + > stable, and may change without notice. + type: "string" + example: "24.04" + OSType: + description: | + Generic type of the operating system of the host, as returned by the + Go runtime (`GOOS`). + + Currently returned values are "linux" and "windows". A full list of + possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + type: "string" + example: "linux" + Architecture: + description: | + Hardware architecture of the host, as returned by the Go runtime + (`GOARCH`). + + A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). + type: "string" + example: "x86_64" + NCPU: + description: | + The number of logical CPUs usable by the daemon. + + The number of available CPUs is checked by querying the operating + system when the daemon starts. Changes to operating system CPU + allocation after the daemon is started are not reflected. + type: "integer" + example: 4 + MemTotal: + description: | + Total amount of physical memory available on the host, in bytes. + type: "integer" + format: "int64" + example: 2095882240 + + IndexServerAddress: + description: | + Address / URL of the index server that is used for image search, + and as a default for user authentication for Docker Hub and Docker Cloud. + default: "https://index.docker.io/v1/" + type: "string" + example: "https://index.docker.io/v1/" + RegistryConfig: + $ref: "#/definitions/RegistryServiceConfig" + GenericResources: + $ref: "#/definitions/GenericResources" + HttpProxy: + description: | + HTTP-proxy configured for the daemon. This value is obtained from the + [`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + are masked in the API response. + + Containers do not automatically inherit this configuration. + type: "string" + example: "http://xxxxx:xxxxx@proxy.corp.example.com:8080" + HttpsProxy: + description: | + HTTPS-proxy configured for the daemon. This value is obtained from the + [`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + are masked in the API response. + + Containers do not automatically inherit this configuration. + type: "string" + example: "https://xxxxx:xxxxx@proxy.corp.example.com:4443" + NoProxy: + description: | + Comma-separated list of domain extensions for which no proxy should be + used. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) + environment variable. + + Containers do not automatically inherit this configuration. + type: "string" + example: "*.local, 169.254/16" + Name: + description: "Hostname of the host." + type: "string" + example: "node5.corp.example.com" + Labels: + description: | + User-defined labels (key/value metadata) as set on the daemon. + +


+ + > **Note**: When part of a Swarm, nodes can both have _daemon_ labels, + > set through the daemon configuration, and _node_ labels, set from a + > manager node in the Swarm. Node labels are not included in this + > field. Node labels can be retrieved using the `/nodes/(id)` endpoint + > on a manager node in the Swarm. + type: "array" + items: + type: "string" + example: ["storage=ssd", "production"] + ExperimentalBuild: + description: | + Indicates if experimental features are enabled on the daemon. + type: "boolean" + example: true + ServerVersion: + description: | + Version string of the daemon. + type: "string" + example: "27.0.1" + Runtimes: + description: | + List of [OCI compliant](https://github.com/opencontainers/runtime-spec) + runtimes configured on the daemon. Keys hold the "name" used to + reference the runtime. + + The Docker daemon relies on an OCI compliant runtime (invoked via the + `containerd` daemon) as its interface to the Linux kernel namespaces, + cgroups, and SELinux. + + The default runtime is `runc`, and automatically configured. Additional + runtimes can be configured by the user and will be listed here. + type: "object" + additionalProperties: + $ref: "#/definitions/Runtime" + default: + runc: + path: "runc" + example: + runc: + path: "runc" + runc-master: + path: "/go/bin/runc" + custom: + path: "/usr/local/bin/my-oci-runtime" + runtimeArgs: ["--debug", "--systemd-cgroup=false"] + DefaultRuntime: + description: | + Name of the default OCI runtime that is used when starting containers. + + The default can be overridden per-container at create time. + type: "string" + default: "runc" + example: "runc" + Swarm: + $ref: "#/definitions/SwarmInfo" + LiveRestoreEnabled: + description: | + Indicates if live restore is enabled. + + If enabled, containers are kept running when the daemon is shutdown + or upon daemon start if running containers are detected. + type: "boolean" + default: false + example: false + Isolation: + description: | + Represents the isolation technology to use as a default for containers. + The supported values are platform-specific. + + If no isolation value is specified on daemon start, on Windows client, + the default is `hyperv`, and on Windows server, the default is `process`. + + This option is currently not used on other platforms. + default: "default" + type: "string" + enum: + - "default" + - "hyperv" + - "process" + - "" + InitBinary: + description: | + Name and, optional, path of the `docker-init` binary. + + If the path is omitted, the daemon searches the host's `$PATH` for the + binary and uses the first result. + type: "string" + example: "docker-init" + ContainerdCommit: + $ref: "#/definitions/Commit" + RuncCommit: + $ref: "#/definitions/Commit" + InitCommit: + $ref: "#/definitions/Commit" + SecurityOptions: + description: | + List of security features that are enabled on the daemon, such as + apparmor, seccomp, SELinux, user-namespaces (userns), rootless and + no-new-privileges. + + Additional configuration options for each security feature may + be present, and are included as a comma-separated list of key/value + pairs. + type: "array" + items: + type: "string" + example: + - "name=apparmor" + - "name=seccomp,profile=default" + - "name=selinux" + - "name=userns" + - "name=rootless" + ProductLicense: + description: | + Reports a summary of the product license on the daemon. + + If a commercial license has been applied to the daemon, information + such as number of nodes, and expiration are included. + type: "string" + example: "Community Engine" + DefaultAddressPools: + description: | + List of custom default address pools for local networks, which can be + specified in the daemon.json file or dockerd option. + + Example: a Base "10.10.0.0/16" with Size 24 will define the set of 256 + 10.10.[0-255].0/24 address pools. + type: "array" + items: + type: "object" + properties: + Base: + description: "The network address in CIDR format" + type: "string" + example: "10.10.0.0/16" + Size: + description: "The network pool size" + type: "integer" + example: "24" + FirewallBackend: + $ref: "#/definitions/FirewallInfo" + DiscoveredDevices: + description: | + List of devices discovered by device drivers. + + Each device includes information about its source driver, kind, name, + and additional driver-specific attributes. + type: "array" + items: + $ref: "#/definitions/DeviceInfo" + Warnings: + description: | + List of warnings / informational messages about missing features, or + issues related to the daemon configuration. + + These messages can be printed by the client as information to the user. + type: "array" + items: + type: "string" + example: + - "WARNING: No memory limit support" + CDISpecDirs: + description: | + List of directories where (Container Device Interface) CDI + specifications are located. + + These specifications define vendor-specific modifications to an OCI + runtime specification for a container being created. + + An empty list indicates that CDI device injection is disabled. + + Note that since using CDI device injection requires the daemon to have + experimental enabled. For non-experimental daemons an empty list will + always be returned. + type: "array" + items: + type: "string" + example: + - "/etc/cdi" + - "/var/run/cdi" + Containerd: + $ref: "#/definitions/ContainerdInfo" + + ContainerdInfo: + description: | + Information for connecting to the containerd instance that is used by the daemon. + This is included for debugging purposes only. + type: "object" + x-nullable: true + properties: + Address: + description: "The address of the containerd socket." + type: "string" + example: "/run/containerd/containerd.sock" + Namespaces: + description: | + The namespaces that the daemon uses for running containers and + plugins in containerd. These namespaces can be configured in the + daemon configuration, and are considered to be used exclusively + by the daemon, Tampering with the containerd instance may cause + unexpected behavior. + + As these namespaces are considered to be exclusively accessed + by the daemon, it is not recommended to change these values, + or to change them to a value that is used by other systems, + such as cri-containerd. + type: "object" + properties: + Containers: + description: | + The default containerd namespace used for containers managed + by the daemon. + + The default namespace for containers is "moby", but will be + suffixed with the `.` of the remapped `root` if + user-namespaces are enabled and the containerd image-store + is used. + type: "string" + default: "moby" + example: "moby" + Plugins: + description: | + The default containerd namespace used for plugins managed by + the daemon. + + The default namespace for plugins is "plugins.moby", but will be + suffixed with the `.` of the remapped `root` if + user-namespaces are enabled and the containerd image-store + is used. + type: "string" + default: "plugins.moby" + example: "plugins.moby" + + FirewallInfo: + description: | + Information about the daemon's firewalling configuration. + + This field is currently only used on Linux, and omitted on other platforms. + type: "object" + x-nullable: true + properties: + Driver: + description: | + The name of the firewall backend driver. + type: "string" + example: "nftables" + Info: + description: | + Information about the firewall backend, provided as + "label" / "value" pairs. + +


+ + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "array" + items: + type: "array" + items: + type: "string" + example: + - ["ReloadedAt", "2025-01-01T00:00:00Z"] + + # PluginsInfo is a temp struct holding Plugins name + # registered with docker daemon. It is used by Info struct + PluginsInfo: + description: | + Available plugins per type. + +


+ + > **Note**: Only unmanaged (V1) plugins are included in this list. + > V1 plugins are "lazily" loaded, and are not returned in this list + > if there is no resource using the plugin. + type: "object" + properties: + Volume: + description: "Names of available volume-drivers, and network-driver plugins." + type: "array" + items: + type: "string" + example: ["local"] + Network: + description: "Names of available network-drivers, and network-driver plugins." + type: "array" + items: + type: "string" + example: ["bridge", "host", "ipvlan", "macvlan", "null", "overlay"] + Authorization: + description: "Names of available authorization plugins." + type: "array" + items: + type: "string" + example: ["img-authz-plugin", "hbm"] + Log: + description: "Names of available logging-drivers, and logging-driver plugins." + type: "array" + items: + type: "string" + example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "splunk", "syslog"] + + + RegistryServiceConfig: + description: | + RegistryServiceConfig stores daemon registry services configuration. + type: "object" + x-nullable: true + properties: + InsecureRegistryCIDRs: + description: | + List of IP ranges of insecure registries, using the CIDR syntax + ([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries + accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates + from unknown CAs) communication. + + By default, local registries (`::1/128` and `127.0.0.0/8`) are configured as + insecure. All other registries are secure. Communicating with an + insecure registry is not possible if the daemon assumes that registry + is secure. + + This configuration override this behavior, insecure communication with + registries whose resolved IP address is within the subnet described by + the CIDR syntax. + + Registries can also be marked insecure by hostname. Those registries + are listed under `IndexConfigs` and have their `Secure` field set to + `false`. + + > **Warning**: Using this option can be useful when running a local + > registry, but introduces security vulnerabilities. This option + > should therefore ONLY be used for testing purposes. For increased + > security, users should add their CA to their system's list of trusted + > CAs instead of enabling this option. + type: "array" + items: + type: "string" + example: ["::1/128", "127.0.0.0/8"] + IndexConfigs: + type: "object" + additionalProperties: + $ref: "#/definitions/IndexInfo" + example: + "127.0.0.1:5000": + "Name": "127.0.0.1:5000" + "Mirrors": [] + "Secure": false + "Official": false + "[2001:db8:a0b:12f0::1]:80": + "Name": "[2001:db8:a0b:12f0::1]:80" + "Mirrors": [] + "Secure": false + "Official": false + "docker.io": + Name: "docker.io" + Mirrors: ["https://hub-mirror.corp.example.com:5000/"] + Secure: true + Official: true + "registry.internal.corp.example.com:3000": + Name: "registry.internal.corp.example.com:3000" + Mirrors: [] + Secure: false + Official: false + Mirrors: + description: | + List of registry URLs that act as a mirror for the official + (`docker.io`) registry. + + type: "array" + items: + type: "string" + example: + - "https://hub-mirror.corp.example.com:5000/" + - "https://[2001:db8:a0b:12f0::1]/" + + IndexInfo: + description: + IndexInfo contains information about a registry. + type: "object" + x-nullable: true + properties: + Name: + description: | + Name of the registry, such as "docker.io". + type: "string" + example: "docker.io" + Mirrors: + description: | + List of mirrors, expressed as URIs. + type: "array" + items: + type: "string" + example: + - "https://hub-mirror.corp.example.com:5000/" + - "https://registry-2.docker.io/" + - "https://registry-3.docker.io/" + Secure: + description: | + Indicates if the registry is part of the list of insecure + registries. + + If `false`, the registry is insecure. Insecure registries accept + un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from + unknown CAs) communication. + + > **Warning**: Insecure registries can be useful when running a local + > registry. However, because its use creates security vulnerabilities + > it should ONLY be enabled for testing purposes. For increased + > security, users should add their CA to their system's list of + > trusted CAs instead of enabling this option. + type: "boolean" + example: true + Official: + description: | + Indicates whether this is an official registry (i.e., Docker Hub / docker.io) + type: "boolean" + example: true + + Runtime: + description: | + Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec) + runtime. + + The runtime is invoked by the daemon via the `containerd` daemon. OCI + runtimes act as an interface to the Linux kernel namespaces, cgroups, + and SELinux. + type: "object" + properties: + path: + description: | + Name and, optional, path, of the OCI executable binary. + + If the path is omitted, the daemon searches the host's `$PATH` for the + binary and uses the first result. + type: "string" + example: "/usr/local/bin/my-oci-runtime" + runtimeArgs: + description: | + List of command-line arguments to pass to the runtime when invoked. + type: "array" + x-nullable: true + items: + type: "string" + example: ["--debug", "--systemd-cgroup=false"] + status: + description: | + Information specific to the runtime. + + While this API specification does not define data provided by runtimes, + the following well-known properties may be provided by runtimes: + + `org.opencontainers.runtime-spec.features`: features structure as defined + in the [OCI Runtime Specification](https://github.com/opencontainers/runtime-spec/blob/main/features.md), + in a JSON string representation. + +


+ + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + "org.opencontainers.runtime-spec.features": "{\"ociVersionMin\":\"1.0.0\",\"ociVersionMax\":\"1.1.0\",\"...\":\"...\"}" + + Commit: + description: | + Commit holds the Git-commit (SHA1) that a binary was built from, as + reported in the version-string of external tools, such as `containerd`, + or `runC`. + type: "object" + properties: + ID: + description: "Actual commit ID of external tool." + type: "string" + example: "cfb82a876ecc11b5ca0977d1733adbe58599088a" + + SwarmInfo: + description: | + Represents generic information about swarm. + type: "object" + properties: + NodeID: + description: "Unique identifier of for this node in the swarm." + type: "string" + default: "" + example: "k67qz4598weg5unwwffg6z1m1" + NodeAddr: + description: | + IP address at which this node can be reached by other nodes in the + swarm. + type: "string" + default: "" + example: "10.0.0.46" + LocalNodeState: + $ref: "#/definitions/LocalNodeState" + ControlAvailable: + type: "boolean" + default: false + example: true + Error: + type: "string" + default: "" + RemoteManagers: + description: | + List of ID's and addresses of other managers in the swarm. + type: "array" + default: null + x-nullable: true + items: + $ref: "#/definitions/PeerNode" + example: + - NodeID: "71izy0goik036k48jg985xnds" + Addr: "10.0.0.158:2377" + - NodeID: "79y6h1o4gv8n120drcprv5nmc" + Addr: "10.0.0.159:2377" + - NodeID: "k67qz4598weg5unwwffg6z1m1" + Addr: "10.0.0.46:2377" + Nodes: + description: "Total number of nodes in the swarm." + type: "integer" + x-nullable: true + example: 4 + Managers: + description: "Total number of managers in the swarm." + type: "integer" + x-nullable: true + example: 3 + Cluster: + $ref: "#/definitions/ClusterInfo" + + LocalNodeState: + description: "Current local status of this node." + type: "string" + default: "" + enum: + - "" + - "inactive" + - "pending" + - "active" + - "error" + - "locked" + example: "active" + + PeerNode: + description: "Represents a peer-node in the swarm" + type: "object" + properties: + NodeID: + description: "Unique identifier of for this node in the swarm." + type: "string" + Addr: + description: | + IP address and ports at which this node can be reached. + type: "string" + + NetworkAttachmentConfig: + description: | + Specifies how a service should be attached to a particular network. + type: "object" + properties: + Target: + description: | + The target network for attachment. Must be a network name or ID. + type: "string" + Aliases: + description: | + Discoverable alternate names for the service on this network. + type: "array" + items: + type: "string" + DriverOpts: + description: | + Driver attachment options for the network target. + type: "object" + additionalProperties: + type: "string" + + EventActor: + description: | + Actor describes something that generates events, like a container, network, + or a volume. + type: "object" + properties: + ID: + description: "The ID of the object emitting the event" + type: "string" + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Attributes: + description: | + Various key/value attributes of the object, depending on its type. + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-label-value" + image: "alpine:latest" + name: "my-container" + + EventMessage: + description: | + EventMessage represents the information an event contains. + type: "object" + title: "SystemEventsResponse" + properties: + Type: + description: "The type of object emitting the event" + type: "string" + enum: ["builder", "config", "container", "daemon", "image", "network", "node", "plugin", "secret", "service", "volume"] + example: "container" + Action: + description: "The type of event" + type: "string" + example: "create" + Actor: + $ref: "#/definitions/EventActor" + scope: + description: | + Scope of the event. Engine events are `local` scope. Cluster (Swarm) + events are `swarm` scope. + type: "string" + enum: ["local", "swarm"] + time: + description: "Timestamp of event" + type: "integer" + format: "int64" + example: 1629574695 + timeNano: + description: "Timestamp of event, with nanosecond accuracy" + type: "integer" + format: "int64" + example: 1629574695515050031 + + OCIDescriptor: + type: "object" + x-go-name: Descriptor + description: | + A descriptor struct containing digest, media type, and size, as defined in + the [OCI Content Descriptors Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/descriptor.md). + properties: + mediaType: + description: | + The media type of the object this schema refers to. + type: "string" + example: "application/vnd.oci.image.manifest.v1+json" + digest: + description: | + The digest of the targeted content. + type: "string" + example: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" + size: + description: | + The size in bytes of the blob. + type: "integer" + format: "int64" + example: 424 + urls: + description: |- + List of URLs from which this object MAY be downloaded. + type: "array" + items: + type: "string" + format: "uri" + x-nullable: true + annotations: + description: |- + Arbitrary metadata relating to the targeted content. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + "com.docker.official-images.bashbrew.arch": "amd64" + "org.opencontainers.image.base.digest": "sha256:0d0ef5c914d3ea700147da1bd050c59edb8bb12ca312f3800b29d7c8087eabd8" + "org.opencontainers.image.base.name": "scratch" + "org.opencontainers.image.created": "2025-01-27T00:00:00Z" + "org.opencontainers.image.revision": "9fabb4bad5138435b01857e2fe9363e2dc5f6a79" + "org.opencontainers.image.source": "https://git.launchpad.net/cloud-images/+oci/ubuntu-base" + "org.opencontainers.image.url": "https://hub.docker.com/_/ubuntu" + "org.opencontainers.image.version": "24.04" + data: + type: string + x-nullable: true + description: |- + Data is an embedding of the targeted content. This is encoded as a base64 + string when marshalled to JSON (automatically, by encoding/json). If + present, Data can be used directly to avoid fetching the targeted content. + example: null + platform: + $ref: "#/definitions/OCIPlatform" + artifactType: + description: |- + ArtifactType is the IANA media type of this artifact. + type: "string" + x-nullable: true + example: null + + OCIPlatform: + type: "object" + x-go-name: Platform + x-nullable: true + description: | + Describes the platform which the image in the manifest runs on, as defined + in the [OCI Image Index Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/image-index.md). + properties: + architecture: + description: | + The CPU architecture, for example `amd64` or `ppc64`. + type: "string" + example: "arm" + os: + description: | + The operating system, for example `linux` or `windows`. + type: "string" + example: "windows" + os.version: + description: | + Optional field specifying the operating system version, for example on + Windows `10.0.19041.1165`. + type: "string" + example: "10.0.19041.1165" + os.features: + description: | + Optional field specifying an array of strings, each listing a required + OS feature (for example on Windows `win32k`). + type: "array" + items: + type: "string" + example: + - "win32k" + variant: + description: | + Optional field specifying a variant of the CPU, for example `v7` to + specify ARMv7 when architecture is `arm`. + type: "string" + example: "v7" + + DistributionInspect: + type: "object" + x-go-name: DistributionInspect + title: "DistributionInspectResponse" + required: [Descriptor, Platforms] + description: | + Describes the result obtained from contacting the registry to retrieve + image metadata. + properties: + Descriptor: + $ref: "#/definitions/OCIDescriptor" + Platforms: + type: "array" + description: | + An array containing all platforms supported by the image. + items: + $ref: "#/definitions/OCIPlatform" + + ClusterVolume: + type: "object" + description: | + Options and information specific to, and only present on, Swarm CSI + cluster volumes. + properties: + ID: + type: "string" + description: | + The Swarm ID of this volume. Because cluster volumes are Swarm + objects, they have an ID, unlike non-cluster volumes. This ID can + be used to refer to the Volume instead of the name. + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ClusterVolumeSpec" + Info: + type: "object" + description: | + Information about the global status of the volume. + properties: + CapacityBytes: + type: "integer" + format: "int64" + description: | + The capacity of the volume in bytes. A value of 0 indicates that + the capacity is unknown. + VolumeContext: + type: "object" + description: | + A map of strings to strings returned from the storage plugin when + the volume is created. + additionalProperties: + type: "string" + VolumeID: + type: "string" + description: | + The ID of the volume as returned by the CSI storage plugin. This + is distinct from the volume's ID as provided by Docker. This ID + is never used by the user when communicating with Docker to refer + to this volume. If the ID is blank, then the Volume has not been + successfully created in the plugin yet. + AccessibleTopology: + type: "array" + description: | + The topology this volume is actually accessible from. + items: + $ref: "#/definitions/Topology" + PublishStatus: + type: "array" + description: | + The status of the volume as it pertains to its publishing and use on + specific nodes + items: + type: "object" + properties: + NodeID: + type: "string" + description: | + The ID of the Swarm node the volume is published on. + State: + type: "string" + description: | + The published state of the volume. + * `pending-publish` The volume should be published to this node, but the call to the controller plugin to do so has not yet been successfully completed. + * `published` The volume is published successfully to the node. + * `pending-node-unpublish` The volume should be unpublished from the node, and the manager is awaiting confirmation from the worker that it has done so. + * `pending-controller-unpublish` The volume is successfully unpublished from the node, but has not yet been successfully unpublished on the controller. + enum: + - "pending-publish" + - "published" + - "pending-node-unpublish" + - "pending-controller-unpublish" + PublishContext: + type: "object" + description: | + A map of strings to strings returned by the CSI controller + plugin when a volume is published. + additionalProperties: + type: "string" + + ClusterVolumeSpec: + type: "object" + description: | + Cluster-specific options used to create the volume. + properties: + Group: + type: "string" + description: | + Group defines the volume group of this volume. Volumes belonging to + the same group can be referred to by group name when creating + Services. Referring to a volume by group instructs Swarm to treat + volumes in that group interchangeably for the purpose of scheduling. + Volumes with an empty string for a group technically all belong to + the same, emptystring group. + AccessMode: + type: "object" + description: | + Defines how the volume is used by tasks. + properties: + Scope: + type: "string" + description: | + The set of nodes this volume can be used on at one time. + - `single` The volume may only be scheduled to one node at a time. + - `multi` the volume may be scheduled to any supported number of nodes at a time. + default: "single" + enum: ["single", "multi"] + x-nullable: false + Sharing: + type: "string" + description: | + The number and way that different tasks can use this volume + at one time. + - `none` The volume may only be used by one task at a time. + - `readonly` The volume may be used by any number of tasks, but they all must mount the volume as readonly + - `onewriter` The volume may be used by any number of tasks, but only one may mount it as read/write. + - `all` The volume may have any number of readers and writers. + default: "none" + enum: ["none", "readonly", "onewriter", "all"] + x-nullable: false + MountVolume: + type: "object" + description: | + Options for using this volume as a Mount-type volume. + + Either MountVolume or BlockVolume, but not both, must be + present. + properties: + FsType: + type: "string" + description: | + Specifies the filesystem type for the mount volume. + Optional. + MountFlags: + type: "array" + description: | + Flags to pass when mounting the volume. Optional. + items: + type: "string" + BlockVolume: + type: "object" + description: | + Options for using this volume as a Block-type volume. + Intentionally empty. + Secrets: + type: "array" + description: | + Swarm Secrets that are passed to the CSI storage plugin when + operating on this volume. + items: + type: "object" + description: | + One cluster volume secret entry. Defines a key-value pair that + is passed to the plugin. + properties: + Key: + type: "string" + description: | + Key is the name of the key of the key-value pair passed to + the plugin. + Secret: + type: "string" + description: | + Secret is the swarm Secret object from which to read data. + This can be a Secret name or ID. The Secret data is + retrieved by swarm and used as the value of the key-value + pair passed to the plugin. + AccessibilityRequirements: + type: "object" + description: | + Requirements for the accessible topology of the volume. These + fields are optional. For an in-depth description of what these + fields mean, see the CSI specification. + properties: + Requisite: + type: "array" + description: | + A list of required topologies, at least one of which the + volume must be accessible from. + items: + $ref: "#/definitions/Topology" + Preferred: + type: "array" + description: | + A list of topologies that the volume should attempt to be + provisioned in. + items: + $ref: "#/definitions/Topology" + CapacityRange: + type: "object" + description: | + The desired capacity that the volume should be created with. If + empty, the plugin will decide the capacity. + properties: + RequiredBytes: + type: "integer" + format: "int64" + description: | + The volume must be at least this big. The value of 0 + indicates an unspecified minimum + LimitBytes: + type: "integer" + format: "int64" + description: | + The volume must not be bigger than this. The value of 0 + indicates an unspecified maximum. + Availability: + type: "string" + description: | + The availability of the volume for use in tasks. + - `active` The volume is fully available for scheduling on the cluster + - `pause` No new workloads should use the volume, but existing workloads are not stopped. + - `drain` All workloads using this volume should be stopped and rescheduled, and no new ones should be started. + default: "active" + x-nullable: false + enum: + - "active" + - "pause" + - "drain" + + Topology: + description: | + A map of topological domains to topological segments. For in depth + details, see documentation for the Topology object in the CSI + specification. + type: "object" + additionalProperties: + type: "string" + + ImageManifestSummary: + x-go-name: "ManifestSummary" + description: | + ImageManifestSummary represents a summary of an image manifest. + type: "object" + required: ["ID", "Descriptor", "Available", "Size", "Kind"] + properties: + ID: + description: | + ID is the content-addressable ID of an image and is the same as the + digest of the image manifest. + type: "string" + example: "sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f" + Descriptor: + $ref: "#/definitions/OCIDescriptor" + Available: + description: Indicates whether all the child content (image config, layers) is fully available locally. + type: "boolean" + example: true + Size: + type: "object" + x-nullable: false + required: ["Content", "Total"] + properties: + Total: + type: "integer" + format: "int64" + example: 8213251 + description: | + Total is the total size (in bytes) of all the locally present + data (both distributable and non-distributable) that's related to + this manifest and its children. + This equal to the sum of [Content] size AND all the sizes in the + [Size] struct present in the Kind-specific data struct. + For example, for an image kind (Kind == "image") + this would include the size of the image content and unpacked + image snapshots ([Size.Content] + [ImageData.Size.Unpacked]). + Content: + description: | + Content is the size (in bytes) of all the locally present + content in the content store (e.g. image config, layers) + referenced by this manifest and its children. + This only includes blobs in the content store. + type: "integer" + format: "int64" + example: 3987495 + Kind: + type: "string" + example: "image" + enum: + - "image" + - "attestation" + - "unknown" + description: | + The kind of the manifest. + + kind | description + -------------|----------------------------------------------------------- + image | Image manifest that can be used to start a container. + attestation | Attestation manifest produced by the Buildkit builder for a specific image manifest. + ImageData: + description: | + The image data for the image manifest. + This field is only populated when Kind is "image". + type: "object" + x-nullable: true + x-omitempty: true + required: ["Platform", "Containers", "Size", "UnpackedSize"] + properties: + Platform: + $ref: "#/definitions/OCIPlatform" + description: | + OCI platform of the image. This will be the platform specified in the + manifest descriptor from the index/manifest list. + If it's not available, it will be obtained from the image config. + Containers: + description: | + The IDs of the containers that are using this image. + type: "array" + items: + type: "string" + example: ["ede54ee1fda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c7430", "abadbce344c096744d8d6071a90d474d28af8f1034b5ea9fb03c3f4bfc6d005e"] + Size: + type: "object" + x-nullable: false + required: ["Unpacked"] + properties: + Unpacked: + type: "integer" + format: "int64" + example: 3987495 + description: | + Unpacked is the size (in bytes) of the locally unpacked + (uncompressed) image content that's directly usable by the containers + running this image. + It's independent of the distributable content - e.g. + the image might still have an unpacked data that's still used by + some container even when the distributable/compressed content is + already gone. + AttestationData: + description: | + The image data for the attestation manifest. + This field is only populated when Kind is "attestation". + type: "object" + x-nullable: true + x-omitempty: true + required: ["For"] + properties: + For: + description: | + The digest of the image manifest that this attestation is for. + type: "string" + example: "sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f" + +paths: + /containers/json: + get: + summary: "List containers" + description: | + Returns a list of containers. For details on the format, see the + [inspect endpoint](#operation/ContainerInspect). + + Note that it uses a different, smaller representation of a container + than inspecting a single container. For example, the list of linked + containers is not propagated . + operationId: "ContainerList" + produces: + - "application/json" + parameters: + - name: "all" + in: "query" + description: | + Return all containers. By default, only running containers are shown. + type: "boolean" + default: false + - name: "limit" + in: "query" + description: | + Return this number of most recently created containers, including + non-running ones. + type: "integer" + - name: "size" + in: "query" + description: | + Return the size of container as fields `SizeRw` and `SizeRootFs`. + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + Filters to process on the container list, encoded as JSON (a + `map[string][]string`). For example, `{"status": ["paused"]}` will + only return paused containers. + + Available filters: + + - `ancestor`=(`[:]`, ``, or ``) + - `before`=(`` or ``) + - `expose`=(`[/]`|`/[]`) + - `exited=` containers with exit code of `` + - `health`=(`starting`|`healthy`|`unhealthy`|`none`) + - `id=` a container's ID + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + - `is-task=`(`true`|`false`) + - `label=key` or `label="key=value"` of a container label + - `name=` a container's name + - `network`=(`` or ``) + - `publish`=(`[/]`|`/[]`) + - `since`=(`` or ``) + - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) + - `volume`=(`` or ``) + type: "string" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/ContainerSummary" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/create: + post: + summary: "Create a container" + operationId: "ContainerCreate" + consumes: + - "application/json" + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "name" + in: "query" + description: | + Assign the specified name to the container. Must match + `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`. + type: "string" + pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$" + - name: "platform" + in: "query" + description: | + Platform in the format `os[/arch[/variant]]` used for image lookup. + + When specified, the daemon checks if the requested image is present + in the local image cache with the given OS and Architecture, and + otherwise returns a `404` status. + + If the option is not set, the host's native OS and Architecture are + used to look up the image in the image cache. However, if no platform + is passed and the given image does exist in the local image cache, + but its OS or architecture does not match, the container is created + with the available image, and a warning is added to the `Warnings` + field in the response, for example; + + WARNING: The requested image's platform (linux/arm64/v8) does not + match the detected host platform (linux/amd64) and no + specific platform was requested + + type: "string" + default: "" + - name: "body" + in: "body" + description: "Container to create" + schema: + allOf: + - $ref: "#/definitions/ContainerConfig" + - type: "object" + properties: + HostConfig: + $ref: "#/definitions/HostConfig" + NetworkingConfig: + $ref: "#/definitions/NetworkingConfig" + example: + Hostname: "" + Domainname: "" + User: "" + AttachStdin: false + AttachStdout: true + AttachStderr: true + Tty: false + OpenStdin: false + StdinOnce: false + Env: + - "FOO=bar" + - "BAZ=quux" + Cmd: + - "date" + Entrypoint: "" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + Volumes: + /volumes/data: {} + WorkingDir: "" + NetworkDisabled: false + MacAddress: "12:34:56:78:9a:bc" + ExposedPorts: + 22/tcp: {} + StopSignal: "SIGTERM" + StopTimeout: 10 + HostConfig: + Binds: + - "/tmp:/tmp" + Links: + - "redis3:redis" + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + NanoCpus: 500000 + CpuPercent: 80 + CpuShares: 512 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpuQuota: 50000 + CpusetCpus: "0,1" + CpusetMems: "0,1" + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 300 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceWriteIOps: + - {} + DeviceRequests: + - Driver: "nvidia" + Count: -1 + DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] + Capabilities: [["gpu", "nvidia", "compute"]] + Options: + property1: "string" + property2: "string" + MemorySwappiness: 60 + OomKillDisable: false + OomScoreAdj: 500 + PidMode: "" + PidsLimit: 0 + PortBindings: + 22/tcp: + - HostPort: "11022" + PublishAllPorts: false + Privileged: false + ReadonlyRootfs: false + Dns: + - "8.8.8.8" + DnsOptions: + - "" + DnsSearch: + - "" + VolumesFrom: + - "parent" + - "other:ro" + CapAdd: + - "NET_ADMIN" + CapDrop: + - "MKNOD" + GroupAdd: + - "newgroup" + RestartPolicy: + Name: "" + MaximumRetryCount: 0 + AutoRemove: true + NetworkMode: "bridge" + Devices: [] + Ulimits: + - {} + LogConfig: + Type: "json-file" + Config: {} + SecurityOpt: [] + StorageOpt: {} + CgroupParent: "" + VolumeDriver: "" + ShmSize: 67108864 + NetworkingConfig: + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + database_nw: {} + + required: true + responses: + 201: + description: "Container created successfully" + schema: + $ref: "#/definitions/ContainerCreateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/{id}/json: + get: + summary: "Inspect a container" + description: "Return low-level information about a container." + operationId: "ContainerInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerInspectResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "size" + in: "query" + type: "boolean" + default: false + description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" + tags: ["Container"] + /containers/{id}/top: + get: + summary: "List processes running inside a container" + description: | + On Unix systems, this is done by running the `ps` command. This endpoint + is not supported on Windows. + operationId: "ContainerTop" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerTopResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "ps_args" + in: "query" + description: "The arguments to pass to `ps`. For example, `aux`" + type: "string" + default: "-ef" + tags: ["Container"] + /containers/{id}/logs: + get: + summary: "Get container logs" + description: | + Get `stdout` and `stderr` logs from a container. + + Note: This endpoint works only for containers with the `json-file` or + `journald` logging driver. + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + operationId: "ContainerLogs" + responses: + 200: + description: | + logs returned as a stream in response body. + For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + Note that unlike the attach endpoint, the logs endpoint does not + upgrade the connection and does not set Content-Type. + schema: + type: "string" + format: "binary" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "until" + in: "query" + description: "Only return logs before this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Container"] + /containers/{id}/changes: + get: + summary: "Get changes on a container’s filesystem" + description: | + Returns which files in a container's filesystem have been added, deleted, + or modified. The `Kind` of modification can be one of: + + - `0`: Modified ("C") + - `1`: Added ("A") + - `2`: Deleted ("D") + operationId: "ContainerChanges" + produces: ["application/json"] + responses: + 200: + description: "The list of changes" + schema: + type: "array" + items: + $ref: "#/definitions/FilesystemChange" + examples: + application/json: + - Path: "/dev" + Kind: 0 + - Path: "/dev/kmsg" + Kind: 1 + - Path: "/test" + Kind: 1 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/export: + get: + summary: "Export a container" + description: "Export the contents of a container as a tarball." + operationId: "ContainerExport" + produces: + - "application/octet-stream" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/stats: + get: + summary: "Get container stats based on resource usage" + description: | + This endpoint returns a live stream of a container’s resource usage + statistics. + + The `precpu_stats` is the CPU statistic of the *previous* read, and is + used to calculate the CPU usage percentage. It is not an exact copy + of the `cpu_stats` field. + + If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is + nil then for compatibility with older daemons the length of the + corresponding `cpu_usage.percpu_usage` array should be used. + + On a cgroup v2 host, the following fields are not set + * `blkio_stats`: all fields other than `io_service_bytes_recursive` + * `cpu_stats`: `cpu_usage.percpu_usage` + * `memory_stats`: `max_usage` and `failcnt` + Also, `memory_stats.stats` fields are incompatible with cgroup v1. + + To calculate the values shown by the `stats` command of the docker cli tool + the following formulas can be used: + * used_memory = `memory_stats.usage - memory_stats.stats.cache` + * available_memory = `memory_stats.limit` + * Memory usage % = `(used_memory / available_memory) * 100.0` + * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` + * system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage` + * number_cpus = `length(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus` + * CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0` + operationId: "ContainerStats" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerStatsResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "stream" + in: "query" + description: | + Stream the output. If false, the stats will be output once and then + it will disconnect. + type: "boolean" + default: true + - name: "one-shot" + in: "query" + description: | + Only get a single stat instead of waiting for 2 cycles. Must be used + with `stream=false`. + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/resize: + post: + summary: "Resize a container TTY" + description: "Resize the TTY for a container." + operationId: "ContainerResize" + consumes: + - "application/octet-stream" + produces: + - "text/plain" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "cannot resize container" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "h" + in: "query" + required: true + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + required: true + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Container"] + /containers/{id}/start: + post: + summary: "Start a container" + operationId: "ContainerStart" + responses: + 204: + description: "no error" + 304: + description: "container already started" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container. Format is a + single character `[a-Z]` or `ctrl-` where `` is one + of: `a-z`, `@`, `^`, `[`, `,` or `_`. + type: "string" + tags: ["Container"] + /containers/{id}/stop: + post: + summary: "Stop a container" + operationId: "ContainerStop" + responses: + 204: + description: "no error" + 304: + description: "container already stopped" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/restart: + post: + summary: "Restart a container" + operationId: "ContainerRestart" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/kill: + post: + summary: "Kill a container" + description: | + Send a POSIX signal to a container, defaulting to killing to the + container. + operationId: "ContainerKill" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is not running" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "Container d37cde0fe4ad63c3a7252023b2f9800282894247d145cb5933ddf6e52cc03a28 is not running" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + default: "SIGKILL" + tags: ["Container"] + /containers/{id}/update: + post: + summary: "Update a container" + description: | + Change various configuration options of a container without having to + recreate it. + operationId: "ContainerUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "The container has been updated." + schema: + $ref: "#/definitions/ContainerUpdateResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "update" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + example: + BlkioWeight: 300 + CpuShares: 512 + CpuPeriod: 100000 + CpuQuota: 50000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpusetCpus: "0,1" + CpusetMems: "0" + Memory: 314572800 + MemorySwap: 514288000 + MemoryReservation: 209715200 + RestartPolicy: + MaximumRetryCount: 4 + Name: "on-failure" + tags: ["Container"] + /containers/{id}/rename: + post: + summary: "Rename a container" + operationId: "ContainerRename" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "name already in use" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "name" + in: "query" + required: true + description: "New name for the container" + type: "string" + tags: ["Container"] + /containers/{id}/pause: + post: + summary: "Pause a container" + description: | + Use the freezer cgroup to suspend all processes in a container. + + Traditionally, when suspending a process the `SIGSTOP` signal is used, + which is observable by the process being suspended. With the freezer + cgroup the process is unaware, and unable to capture, that it is being + suspended, and subsequently resumed. + operationId: "ContainerPause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/unpause: + post: + summary: "Unpause a container" + description: "Resume a container which has been paused." + operationId: "ContainerUnpause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/attach: + post: + summary: "Attach to a container" + description: | + Attach to a container to read its output or send it input. You can attach + to the same container multiple times and you can reattach to containers + that have been detached. + + Either the `stream` or `logs` parameter must be `true` for this endpoint + to do anything. + + See the [documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) + for more details. + + ### Hijacking + + This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, + and `stderr` on the same socket. + + This is the response from the daemon for an attach request: + + ``` + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + [STREAM] + ``` + + After the headers and two new lines, the TCP connection can now be used + for raw, bidirectional communication between the client and server. + + To hint potential proxies about connection hijacking, the Docker client + can also optionally send connection upgrade headers. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 + Upgrade: tcp + Connection: Upgrade + ``` + + The Docker daemon will respond with a `101 UPGRADED` response, and will + similarly follow with the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + [STREAM] + ``` + + ### Stream format + + When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), + the HTTP Content-Type header is set to application/vnd.docker.multiplexed-stream + and the stream over the hijacked connected is multiplexed to separate out + `stdout` and `stderr`. The stream consists of a series of frames, each + containing a header and a payload. + + The header contains the information which the stream writes (`stdout` or + `stderr`). It also contains the size of the associated frame encoded in + the last four bytes (`uint32`). + + It is encoded on the first eight bytes like this: + + ```go + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + ``` + + `STREAM_TYPE` can be: + + - 0: `stdin` (is written on `stdout`) + - 1: `stdout` + - 2: `stderr` + + `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size + encoded as big endian. + + Following the header is the payload, which is the specified number of + bytes of `STREAM_TYPE`. + + The simplest way to implement this protocol is the following: + + 1. Read 8 bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + + ### Stream format when using a TTY + + When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), + the stream is not multiplexed. The data exchanged over the hijacked + connection is simply the raw data from the process PTY and client's + `stdin`. + + operationId: "ContainerAttach" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container.Format is a single + character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, + `@`, `^`, `[`, `,` or `_`. + type: "string" + - name: "logs" + in: "query" + description: | + Replay previous logs from the container. + + This is useful for attaching to a container that has started and you + want to output everything since the container started. + + If `stream` is also enabled, once all the previous output has been + returned, it will seamlessly transition into streaming current + output. + type: "boolean" + default: false + - name: "stream" + in: "query" + description: | + Stream attached streams from the time the request was made onwards. + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/attach/ws: + get: + summary: "Attach to a container via a websocket" + operationId: "ContainerAttachWebsocket" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container.Format is a single + character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, + `@`, `^`, `[`, `,`, or `_`. + type: "string" + - name: "logs" + in: "query" + description: "Return logs" + type: "boolean" + default: false + - name: "stream" + in: "query" + description: "Return stream" + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/wait: + post: + summary: "Wait for a container" + description: "Block until a container stops, then returns the exit code." + operationId: "ContainerWait" + produces: ["application/json"] + responses: + 200: + description: "The container has exit." + schema: + $ref: "#/definitions/ContainerWaitResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "condition" + in: "query" + description: | + Wait until a container state reaches the given condition. + + Defaults to `not-running` if omitted or empty. + type: "string" + enum: + - "not-running" + - "next-exit" + - "removed" + default: "not-running" + tags: ["Container"] + /containers/{id}: + delete: + summary: "Remove a container" + operationId: "ContainerDelete" + responses: + 204: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: | + You cannot remove a running container: c2ada9df5af8. Stop the + container before attempting removal or force remove + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "v" + in: "query" + description: "Remove anonymous volumes associated with the container." + type: "boolean" + default: false + - name: "force" + in: "query" + description: "If the container is running, kill it before removing it." + type: "boolean" + default: false + - name: "link" + in: "query" + description: "Remove the specified link associated with the container." + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/archive: + head: + summary: "Get information about files in a container" + description: | + A response header `X-Docker-Container-Path-Stat` is returned, containing + a base64 - encoded JSON object with some filesystem header information + about the path. + operationId: "ContainerArchiveInfo" + responses: + 200: + description: "no error" + headers: + X-Docker-Container-Path-Stat: + type: "string" + description: | + A base64 - encoded JSON object with some filesystem header + information about the path + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + get: + summary: "Get an archive of a filesystem resource in a container" + description: "Get a tar archive of a resource in the filesystem of container id." + operationId: "ContainerArchive" + produces: ["application/x-tar"] + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + put: + summary: "Extract an archive of files or folders to a directory in a container" + description: | + Upload a tar archive to be extracted to a path in the filesystem of container id. + `path` parameter is asserted to be a directory. If it exists as a file, 400 error + will be returned with message "not a directory". + operationId: "PutContainerArchive" + consumes: ["application/x-tar", "application/octet-stream"] + responses: + 200: + description: "The content was extracted successfully" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "not a directory" + 403: + description: "Permission denied, the volume or container rootfs is marked as read-only." + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such container or path does not exist inside the container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Path to a directory in the container to extract the archive’s contents into. " + type: "string" + - name: "noOverwriteDirNonDir" + in: "query" + description: | + If `1`, `true`, or `True` then it will be an error if unpacking the + given content would cause an existing directory to be replaced with + a non-directory and vice versa. + type: "string" + - name: "copyUIDGID" + in: "query" + description: | + If `1`, `true`, then it will copy UID/GID maps to the dest file or + dir + type: "string" + - name: "inputStream" + in: "body" + required: true + description: | + The input stream must be a tar archive compressed with one of the + following algorithms: `identity` (no compression), `gzip`, `bzip2`, + or `xz`. + schema: + type: "string" + format: "binary" + tags: ["Container"] + /containers/prune: + post: + summary: "Delete stopped containers" + produces: + - "application/json" + operationId: "ContainerPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=` Prune containers created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune containers with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ContainerPruneResponse" + properties: + ContainersDeleted: + description: "Container IDs that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /images/json: + get: + summary: "List Images" + description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." + operationId: "ImageList" + produces: + - "application/json" + responses: + 200: + description: "Summary image data for the images matching the query" + schema: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "all" + in: "query" + description: "Show all images. Only images from a final layer (no children) are shown by default." + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the images list. + + Available filters: + + - `before`=(`[:]`, `` or ``) + - `dangling=true` + - `label=key` or `label="key=value"` of an image label + - `reference`=(`[:]`) + - `since`=(`[:]`, `` or ``) + - `until=` + type: "string" + - name: "shared-size" + in: "query" + description: "Compute and show shared size as a `SharedSize` field on each image." + type: "boolean" + default: false + - name: "digests" + in: "query" + description: "Show digest information as a `RepoDigests` field on each image." + type: "boolean" + default: false + - name: "manifests" + in: "query" + description: "Include `Manifests` in the image summary." + type: "boolean" + default: false + tags: ["Image"] + /build: + post: + summary: "Build an image" + description: | + Build an image from a tar archive with a `Dockerfile` in it. + + The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). + + The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. + + The build is canceled if the client drops the connection by quitting or being killed. + operationId: "ImageBuild" + consumes: + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "inputStream" + in: "body" + description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." + schema: + type: "string" + format: "binary" + - name: "dockerfile" + in: "query" + description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." + type: "string" + default: "Dockerfile" + - name: "t" + in: "query" + description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." + type: "string" + - name: "extrahosts" + in: "query" + description: "Extra hosts to add to /etc/hosts" + type: "string" + - name: "remote" + in: "query" + description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." + type: "string" + - name: "q" + in: "query" + description: "Suppress verbose build output." + type: "boolean" + default: false + - name: "nocache" + in: "query" + description: "Do not use the cache when building the image." + type: "boolean" + default: false + - name: "cachefrom" + in: "query" + description: "JSON array of images used for build cache resolution." + type: "string" + - name: "pull" + in: "query" + description: "Attempt to pull the image even if an older image exists locally." + type: "string" + - name: "rm" + in: "query" + description: "Remove intermediate containers after a successful build." + type: "boolean" + default: true + - name: "forcerm" + in: "query" + description: "Always remove intermediate containers, even upon failure." + type: "boolean" + default: false + - name: "memory" + in: "query" + description: "Set memory limit for build." + type: "integer" + - name: "memswap" + in: "query" + description: "Total memory (memory + swap). Set as `-1` to disable swap." + type: "integer" + - name: "cpushares" + in: "query" + description: "CPU shares (relative weight)." + type: "integer" + - name: "cpusetcpus" + in: "query" + description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." + type: "string" + - name: "cpuperiod" + in: "query" + description: "The length of a CPU period in microseconds." + type: "integer" + - name: "cpuquota" + in: "query" + description: "Microseconds of CPU time that the container can get in a CPU period." + type: "integer" + - name: "buildargs" + in: "query" + description: > + JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker + uses the buildargs as the environment context for commands run via the `Dockerfile` RUN + instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for + passing secret values. + + + For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the + query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded. + + + [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg) + type: "string" + - name: "shmsize" + in: "query" + description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." + type: "integer" + - name: "squash" + in: "query" + description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" + type: "boolean" + - name: "labels" + in: "query" + description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." + type: "string" + - name: "networkmode" + in: "query" + description: | + Sets the networking mode for the run commands during build. Supported + standard values are: `bridge`, `host`, `none`, and `container:`. + Any other value is taken as a custom network's name or ID to which this + container should connect to. + type: "string" + - name: "Content-type" + in: "header" + type: "string" + enum: + - "application/x-tar" + default: "application/x-tar" + - name: "X-Registry-Config" + in: "header" + description: | + This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. + + The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: + + ``` + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + ``` + + Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. + type: "string" + - name: "platform" + in: "query" + description: "Platform in the format os[/arch[/variant]]" + type: "string" + default: "" + - name: "target" + in: "query" + description: "Target build stage" + type: "string" + default: "" + - name: "outputs" + in: "query" + description: "BuildKit output configuration" + type: "string" + default: "" + - name: "version" + in: "query" + type: "string" + default: "1" + enum: ["1", "2"] + description: | + Version of the builder backend to use. + + - `1` is the first generation classic (deprecated) builder in the Docker daemon (default) + - `2` is [BuildKit](https://github.com/moby/buildkit) + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /build/prune: + post: + summary: "Delete builder cache" + produces: + - "application/json" + operationId: "BuildPrune" + parameters: + - name: "keep-storage" + in: "query" + description: | + Amount of disk space in bytes to keep for cache + + > **Deprecated**: This parameter is deprecated and has been renamed to "reserved-space". + > It is kept for backward compatibility and will be removed in API v1.49. + type: "integer" + format: "int64" + - name: "reserved-space" + in: "query" + description: "Amount of disk space in bytes to keep for cache" + type: "integer" + format: "int64" + - name: "max-used-space" + in: "query" + description: "Maximum amount of disk space allowed to keep for cache" + type: "integer" + format: "int64" + - name: "min-free-space" + in: "query" + description: "Target amount of free disk space after pruning" + type: "integer" + format: "int64" + - name: "all" + in: "query" + type: "boolean" + description: "Remove all types of build cache" + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the list of build cache objects. + + Available filters: + + - `until=` remove cache older than ``. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon's local time. + - `id=` + - `parent=` + - `type=` + - `description=` + - `inuse` + - `shared` + - `private` + responses: + 200: + description: "No error" + schema: + type: "object" + title: "BuildPruneResponse" + properties: + CachesDeleted: + type: "array" + items: + description: "ID of build cache object" + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /images/create: + post: + summary: "Create an image" + description: "Pull or import an image." + operationId: "ImageCreate" + consumes: + - "text/plain" + - "application/octet-stream" + produces: + - "application/json" + responses: + 200: + description: "no error" + 404: + description: "repository does not exist or no read access" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "fromImage" + in: "query" + description: | + Name of the image to pull. If the name includes a tag or digest, specific behavior applies: + + - If only `fromImage` includes a tag, that tag is used. + - If both `fromImage` and `tag` are provided, `tag` takes precedence. + - If `fromImage` includes a digest, the image is pulled by digest, and `tag` is ignored. + - If neither a tag nor digest is specified, all tags are pulled. + type: "string" + - name: "fromSrc" + in: "query" + description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." + type: "string" + - name: "repo" + in: "query" + description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." + type: "string" + - name: "tag" + in: "query" + description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." + type: "string" + - name: "message" + in: "query" + description: "Set commit message for imported image." + type: "string" + - name: "inputImage" + in: "body" + description: "Image content if the value `-` has been specified in fromSrc query parameter" + schema: + type: "string" + required: false + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "changes" + in: "query" + description: | + Apply `Dockerfile` instructions to the image that is created, + for example: `changes=ENV DEBUG=true`. + Note that `ENV DEBUG=true` should be URI component encoded. + + Supported `Dockerfile` instructions: + `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` + type: "array" + items: + type: "string" + - name: "platform" + in: "query" + description: | + Platform in the format os[/arch[/variant]]. + + When used in combination with the `fromImage` option, the daemon checks + if the given image is present in the local image cache with the given + OS and Architecture, and otherwise attempts to pull the image. If the + option is not set, the host's native OS and Architecture are used. + If the given image does not exist in the local image cache, the daemon + attempts to pull the image with the host's native OS and Architecture. + If the given image does exists in the local image cache, but its OS or + architecture does not match, a warning is produced. + + When used with the `fromSrc` option to import an image from an archive, + this option sets the platform information for the imported image. If + the option is not set, the host's native OS and Architecture are used + for the imported image. + type: "string" + default: "" + tags: ["Image"] + /images/{name}/json: + get: + summary: "Inspect an image" + description: "Return low-level information about an image." + operationId: "ImageInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/ImageInspect" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + - name: "manifests" + in: "query" + description: "Include Manifests in the image summary." + type: "boolean" + default: false + required: false + tags: ["Image"] + /images/{name}/history: + get: + summary: "Get the history of an image" + description: "Return parent layers of an image." + operationId: "ImageHistory" + produces: ["application/json"] + responses: + 200: + description: "List of image layers" + schema: + type: "array" + items: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false + examples: + application/json: + - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" + Created: 1398108230 + CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" + Tags: + - "ubuntu:lucid" + - "ubuntu:10.04" + Size: 182964289 + Comment: "" + - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" + Created: 1398108222 + CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" + Tags: [] + Size: 0 + Comment: "" + - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" + Created: 1371157430 + CreatedBy: "" + Tags: + - "scratch12:latest" + - "scratch:latest" + Size: 0 + Comment: "Imported from -" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "platform" + type: "string" + in: "query" + description: | + JSON-encoded OCI platform to select the platform-variant. + If omitted, it defaults to any locally available platform, + prioritizing the daemon's host platform. + + If the daemon provides a multi-platform image store, this selects + the platform-variant to show the history for. If the image is + a single-platform image, or if the multi-platform image does not + provide a variant matching the given platform, an error is returned. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /images/{name}/push: + post: + summary: "Push an image" + description: | + Push an image to a registry. + + If you wish to push an image on to a private registry, that image must + already have a tag which references the registry. For example, + `registry.example.com/myimage:latest`. + + The push is cancelled if the HTTP connection is closed. + operationId: "ImagePush" + consumes: + - "application/octet-stream" + responses: + 200: + description: "No error" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + Name of the image to push. For example, `registry.example.com/myimage`. + The image must be present in the local image store with the same name. + + The name should be provided without tag; if a tag is provided, it + is ignored. For example, `registry.example.com/myimage:latest` is + considered equivalent to `registry.example.com/myimage`. + + Use the `tag` parameter to specify the tag to push. + type: "string" + required: true + - name: "tag" + in: "query" + description: | + Tag of the image to push. For example, `latest`. If no tag is provided, + all tags of the given image that are present in the local image store + are pushed. + type: "string" + - name: "platform" + type: "string" + in: "query" + description: | + JSON-encoded OCI platform to select the platform-variant to push. + If not provided, all available variants will attempt to be pushed. + + If the daemon provides a multi-platform image store, this selects + the platform-variant to push to the registry. If the image is + a single-platform image, or if the multi-platform image does not + provide a variant matching the given platform, an error is returned. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + required: true + tags: ["Image"] + /images/{name}/tag: + post: + summary: "Tag an image" + description: "Tag an image so that it becomes part of a repository." + operationId: "ImageTag" + responses: + 201: + description: "No error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID to tag." + type: "string" + required: true + - name: "repo" + in: "query" + description: "The repository to tag in. For example, `someuser/someimage`." + type: "string" + - name: "tag" + in: "query" + description: "The name of the new tag." + type: "string" + tags: ["Image"] + /images/{name}: + delete: + summary: "Remove an image" + description: | + Remove an image, along with any untagged parent images that were + referenced by that image. + + Images can't be removed if they have descendant images, are being + used by a running container or are being used by a build. + operationId: "ImageDelete" + produces: ["application/json"] + responses: + 200: + description: "The image was deleted successfully" + schema: + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + examples: + application/json: + - Untagged: "3e2f21a89f" + - Deleted: "3e2f21a89f" + - Deleted: "53b4f83ac9" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "force" + in: "query" + description: "Remove the image even if it is being used by stopped containers or has other tags" + type: "boolean" + default: false + - name: "noprune" + in: "query" + description: "Do not delete untagged parent images" + type: "boolean" + default: false + - name: "platforms" + in: "query" + description: | + Select platform-specific content to delete. + Multiple values are accepted. + Each platform is a OCI platform encoded as a JSON string. + type: "array" + items: + # This should be OCIPlatform + # but $ref is not supported for array in query in Swagger 2.0 + # $ref: "#/definitions/OCIPlatform" + type: "string" + tags: ["Image"] + /images/search: + get: + summary: "Search images" + description: "Search for an image on Docker Hub." + operationId: "ImageSearch" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + type: "object" + title: "ImageSearchResponseItem" + properties: + description: + type: "string" + is_official: + type: "boolean" + is_automated: + description: | + Whether this repository has automated builds enabled. + +


+ + > **Deprecated**: This field is deprecated and will always be "false". + type: "boolean" + example: false + name: + type: "string" + star_count: + type: "integer" + examples: + application/json: + - description: "A minimal Docker image based on Alpine Linux with a complete package index and only 5 MB in size!" + is_official: true + is_automated: false + name: "alpine" + star_count: 10093 + - description: "Busybox base image." + is_official: true + is_automated: false + name: "Busybox base image." + star_count: 3037 + - description: "The PostgreSQL object-relational database system provides reliability and data integrity." + is_official: true + is_automated: false + name: "postgres" + star_count: 12408 + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "term" + in: "query" + description: "Term to search" + type: "string" + required: true + - name: "limit" + in: "query" + description: "Maximum number of results to return" + type: "integer" + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: + + - `is-official=(true|false)` + - `stars=` Matches images that has at least 'number' stars. + type: "string" + tags: ["Image"] + /images/prune: + post: + summary: "Delete unused images" + produces: + - "application/json" + operationId: "ImagePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: + + - `dangling=` When set to `true` (or `1`), prune only + unused *and* untagged images. When set to `false` + (or `0`), all unused images are pruned. + - `until=` Prune images created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune images with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ImagePruneResponse" + properties: + ImagesDeleted: + description: "Images that were deleted" + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /auth: + post: + summary: "Check auth configuration" + description: | + Validate credentials for a registry and, if available, get an identity + token for accessing the registry without password. + operationId: "SystemAuth" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "An identity token was generated successfully." + schema: + type: "object" + title: "SystemAuthResponse" + required: [Status] + properties: + Status: + description: "The status of the authentication" + type: "string" + x-nullable: false + IdentityToken: + description: "An opaque token used to authenticate a user after a successful login" + type: "string" + x-nullable: false + examples: + application/json: + Status: "Login Succeeded" + IdentityToken: "9cbaf023786cd7..." + 204: + description: "No error" + 401: + description: "Auth error" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "authConfig" + in: "body" + description: "Authentication to check" + schema: + $ref: "#/definitions/AuthConfig" + tags: ["System"] + /info: + get: + summary: "Get system information" + operationId: "SystemInfo" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/SystemInfo" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /version: + get: + summary: "Get version" + description: "Returns the version of Docker that is running and various information about the system that Docker is running on." + operationId: "SystemVersion" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/SystemVersion" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /_ping: + get: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPing" + produces: ["text/plain"] + responses: + 200: + description: "no error" + schema: + type: "string" + example: "OK" + headers: + Api-Version: + type: "string" + description: "Max API Version the server supports" + Builder-Version: + type: "string" + description: | + Default version of docker image builder + + The default on Linux is version "2" (BuildKit), but the daemon + can be configured to recommend version "1" (classic Builder). + Windows does not yet support BuildKit for native Windows images, + and uses "1" (classic builder) as a default. + + This value is a recommendation as advertised by the daemon, and + it is up to the client to choose which builder to use. + default: "2" + Docker-Experimental: + type: "boolean" + description: "If the server is running with experimental mode enabled" + Swarm: + type: "string" + enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] + description: | + Contains information about Swarm status of the daemon, + and if the daemon is acting as a manager or worker node. + default: "inactive" + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + headers: + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + tags: ["System"] + head: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPingHead" + produces: ["text/plain"] + responses: + 200: + description: "no error" + schema: + type: "string" + example: "(empty)" + headers: + Api-Version: + type: "string" + description: "Max API Version the server supports" + Builder-Version: + type: "string" + description: "Default version of docker image builder" + Docker-Experimental: + type: "boolean" + description: "If the server is running with experimental mode enabled" + Swarm: + type: "string" + enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] + description: | + Contains information about Swarm status of the daemon, + and if the daemon is acting as a manager or worker node. + default: "inactive" + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /commit: + post: + summary: "Create a new image from a container" + operationId: "ImageCommit" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "containerConfig" + in: "body" + description: "The container configuration" + schema: + $ref: "#/definitions/ContainerConfig" + - name: "container" + in: "query" + description: "The ID or name of the container to commit" + type: "string" + - name: "repo" + in: "query" + description: "Repository name for the created image" + type: "string" + - name: "tag" + in: "query" + description: "Tag name for the create image" + type: "string" + - name: "comment" + in: "query" + description: "Commit message" + type: "string" + - name: "author" + in: "query" + description: "Author of the image (e.g., `John Hannibal Smith `)" + type: "string" + - name: "pause" + in: "query" + description: "Whether to pause the container before committing" + type: "boolean" + default: true + - name: "changes" + in: "query" + description: "`Dockerfile` instructions to apply while committing" + type: "string" + tags: ["Image"] + /events: + get: + summary: "Monitor events" + description: | + Stream real-time events from the server. + + Various objects within Docker report events when something happens to them. + + Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, `update`, and `prune` + + Images report these events: `create`, `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune` + + Volumes report these events: `create`, `mount`, `unmount`, `destroy`, and `prune` + + Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, `remove`, and `prune` + + The Docker daemon reports these events: `reload` + + Services report these events: `create`, `update`, and `remove` + + Nodes report these events: `create`, `update`, and `remove` + + Secrets report these events: `create`, `update`, and `remove` + + Configs report these events: `create`, `update`, and `remove` + + The Builder reports `prune` events + + operationId: "SystemEvents" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/EventMessage" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "since" + in: "query" + description: "Show events created since this timestamp then stream new events." + type: "string" + - name: "until" + in: "query" + description: "Show events created until this timestamp then stop streaming." + type: "string" + - name: "filters" + in: "query" + description: | + A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: + + - `config=` config name or ID + - `container=` container name or ID + - `daemon=` daemon name or ID + - `event=` event type + - `image=` image name or ID + - `label=` image or container label + - `network=` network name or ID + - `node=` node ID + - `plugin`= plugin name or ID + - `scope`= local or swarm + - `secret=` secret name or ID + - `service=` service name or ID + - `type=` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config` + - `volume=` volume name + type: "string" + tags: ["System"] + /system/df: + get: + summary: "Get data usage information" + operationId: "SystemDataUsage" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "SystemDataUsageResponse" + properties: + LayersSize: + type: "integer" + format: "int64" + Images: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + Containers: + type: "array" + items: + $ref: "#/definitions/ContainerSummary" + Volumes: + type: "array" + items: + $ref: "#/definitions/Volume" + BuildCache: + type: "array" + items: + $ref: "#/definitions/BuildCache" + example: + LayersSize: 1092588 + Images: + - + Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + ParentId: "" + RepoTags: + - "busybox:latest" + RepoDigests: + - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6" + Created: 1466724217 + Size: 1092588 + SharedSize: 0 + Labels: {} + Containers: 1 + Containers: + - + Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148" + Names: + - "/top" + Image: "busybox" + ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + Command: "top" + Created: 1472592424 + Ports: [] + SizeRootFs: 1092588 + Labels: {} + State: "exited" + Status: "Exited (0) 56 minutes ago" + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + IPAMConfig: null + Links: null + Aliases: null + NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92" + EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a" + Gateway: "172.18.0.1" + IPAddress: "172.18.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:12:00:02" + Mounts: [] + Volumes: + - + Name: "my-volume" + Driver: "local" + Mountpoint: "/var/lib/docker/volumes/my-volume/_data" + Labels: null + Scope: "local" + Options: null + UsageData: + Size: 10920104 + RefCount: 2 + BuildCache: + - + ID: "hw53o5aio51xtltp5xjp8v7fx" + Parents: [] + Type: "regular" + Description: "pulled from docker.io/library/debian@sha256:234cb88d3020898631af0ccbbcca9a66ae7306ecd30c9720690858c1b007d2a0" + InUse: false + Shared: true + Size: 0 + CreatedAt: "2021-06-28T13:31:01.474619385Z" + LastUsedAt: "2021-07-07T22:02:32.738075951Z" + UsageCount: 26 + - + ID: "ndlpt0hhvkqcdfkputsk4cq9c" + Parents: ["ndlpt0hhvkqcdfkputsk4cq9c"] + Type: "regular" + Description: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" + InUse: false + Shared: true + Size: 51 + CreatedAt: "2021-06-28T13:31:03.002625487Z" + LastUsedAt: "2021-07-07T22:02:32.773909517Z" + UsageCount: 26 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "type" + in: "query" + description: | + Object types, for which to compute and return data. + type: "array" + collectionFormat: multi + items: + type: "string" + enum: ["container", "image", "volume", "build-cache"] + tags: ["System"] + /images/{name}/get: + get: + summary: "Export an image" + description: | + Get a tarball containing all images and metadata for a repository. + + If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. + + ### Image tarball format + + An image tarball contains [Content as defined in the OCI Image Layout Specification](https://github.com/opencontainers/image-spec/blob/v1.1.1/image-layout.md#content). + + Additionally, includes the manifest.json file associated with a backwards compatible docker save format. + + If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. + + ```json + { + "hello-world": { + "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" + } + } + ``` + operationId: "ImageGet" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "platform" + type: "string" + in: "query" + description: | + JSON encoded OCI platform describing a platform which will be used + to select a platform-specific image to be saved if the image is + multi-platform. + If not provided, the full multi-platform image will be saved. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /images/get: + get: + summary: "Export several images" + description: | + Get a tarball containing all images and metadata for several image + repositories. + + For each value of the `names` parameter: if it is a specific name and + tag (e.g. `ubuntu:latest`), then only that image (and its parents) are + returned; if it is an image ID, similarly only that image (and its parents) + are returned and there would be no names referenced in the 'repositories' + file for this image ID. + + For details on the format, see the [export image endpoint](#operation/ImageGet). + operationId: "ImageGetAll" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "names" + in: "query" + description: "Image names to filter by" + type: "array" + items: + type: "string" + - name: "platform" + type: "string" + in: "query" + description: | + JSON encoded OCI platform describing a platform which will be used + to select a platform-specific image to be saved if the image is + multi-platform. + If not provided, the full multi-platform image will be saved. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /images/load: + post: + summary: "Import images" + description: | + Load a set of images and tags into a repository. + + For details on the format, see the [export image endpoint](#operation/ImageGet). + operationId: "ImageLoad" + consumes: + - "application/x-tar" + produces: + - "application/json" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "imagesTarball" + in: "body" + description: "Tar archive containing images" + schema: + type: "string" + format: "binary" + - name: "quiet" + in: "query" + description: "Suppress progress details during load." + type: "boolean" + default: false + - name: "platform" + type: "string" + in: "query" + description: | + JSON encoded OCI platform describing a platform which will be used + to select a platform-specific image to be load if the image is + multi-platform. + If not provided, the full multi-platform image will be loaded. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] + /containers/{id}/exec: + post: + summary: "Create an exec instance" + description: "Run a command inside a running container." + operationId: "ContainerExec" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is paused" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execConfig" + in: "body" + description: "Exec configuration" + schema: + type: "object" + title: "ExecConfig" + properties: + AttachStdin: + type: "boolean" + description: "Attach to `stdin` of the exec command." + AttachStdout: + type: "boolean" + description: "Attach to `stdout` of the exec command." + AttachStderr: + type: "boolean" + description: "Attach to `stderr` of the exec command." + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array." + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + example: [80, 64] + DetachKeys: + type: "string" + description: | + Override the key sequence for detaching a container. Format is + a single character `[a-Z]` or `ctrl-` where `` + is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + Env: + description: | + A list of environment variables in the form `["VAR=value", ...]`. + type: "array" + items: + type: "string" + Cmd: + type: "array" + description: "Command to run, as a string or array of strings." + items: + type: "string" + Privileged: + type: "boolean" + description: "Runs the exec process with extended privileges." + default: false + User: + type: "string" + description: | + The user, and optionally, group to run the exec process inside + the container. Format is one of: `user`, `user:group`, `uid`, + or `uid:gid`. + WorkingDir: + type: "string" + description: | + The working directory for the exec process inside the container. + example: + AttachStdin: false + AttachStdout: true + AttachStderr: true + DetachKeys: "ctrl-p,ctrl-q" + Tty: false + Cmd: + - "date" + Env: + - "FOO=bar" + - "BAZ=quux" + required: true + - name: "id" + in: "path" + description: "ID or name of container" + type: "string" + required: true + tags: ["Exec"] + /exec/{id}/start: + post: + summary: "Start an exec instance" + description: | + Starts a previously set up exec instance. If detach is true, this endpoint + returns immediately after starting the command. Otherwise, it sets up an + interactive session with the command. + operationId: "ExecStart" + consumes: + - "application/json" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 200: + description: "No error" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Container is stopped or paused" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execStartConfig" + in: "body" + schema: + type: "object" + title: "ExecStartConfig" + properties: + Detach: + type: "boolean" + description: "Detach from the command." + example: false + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + example: true + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array." + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + example: [80, 64] + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + /exec/{id}/resize: + post: + summary: "Resize an exec instance" + description: | + Resize the TTY session used by an exec instance. This endpoint only works + if `tty` was specified as part of creating and starting the exec instance. + operationId: "ExecResize" + responses: + 200: + description: "No error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + - name: "h" + in: "query" + required: true + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + required: true + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Exec"] + /exec/{id}/json: + get: + summary: "Inspect an exec instance" + description: "Return low-level information about an exec instance." + operationId: "ExecInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ExecInspectResponse" + properties: + CanRemove: + type: "boolean" + DetachKeys: + type: "string" + ID: + type: "string" + Running: + type: "boolean" + ExitCode: + type: "integer" + ProcessConfig: + $ref: "#/definitions/ProcessConfig" + OpenStdin: + type: "boolean" + OpenStderr: + type: "boolean" + OpenStdout: + type: "boolean" + ContainerID: + type: "string" + Pid: + type: "integer" + description: "The system process ID for the exec process." + examples: + application/json: + CanRemove: false + ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" + DetachKeys: "" + ExitCode: 2 + ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" + OpenStderr: true + OpenStdin: true + OpenStdout: true + ProcessConfig: + arguments: + - "-c" + - "exit 2" + entrypoint: "sh" + privileged: false + tty: true + user: "1000" + Running: false + Pid: 42000 + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + + /volumes: + get: + summary: "List volumes" + operationId: "VolumeList" + produces: ["application/json"] + responses: + 200: + description: "Summary volume data that matches the query" + schema: + $ref: "#/definitions/VolumeListResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to + process on the volumes list. Available filters: + + - `dangling=` When set to `true` (or `1`), returns all + volumes that are not in use by a container. When set to `false` + (or `0`), only volumes that are in use by one or more + containers are returned. + - `driver=` Matches volumes based on their driver. + - `label=` or `label=:` Matches volumes based on + the presence of a `label` alone or a `label` and a value. + - `name=` Matches all or part of a volume name. + type: "string" + format: "json" + tags: ["Volume"] + + /volumes/create: + post: + summary: "Create a volume" + operationId: "VolumeCreate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 201: + description: "The volume was created successfully" + schema: + $ref: "#/definitions/Volume" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "volumeConfig" + in: "body" + required: true + description: "Volume configuration" + schema: + $ref: "#/definitions/VolumeCreateOptions" + tags: ["Volume"] + + /volumes/{name}: + get: + summary: "Inspect a volume" + operationId: "VolumeInspect" + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Volume" + 404: + description: "No such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + tags: ["Volume"] + + put: + summary: | + "Update a volume. Valid only for Swarm cluster volumes" + operationId: "VolumeUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name or ID of the volume" + type: "string" + required: true + - name: "body" + in: "body" + schema: + # though the schema for is an object that contains only a + # ClusterVolumeSpec, wrapping the ClusterVolumeSpec in this object + # means that if, later on, we support things like changing the + # labels, we can do so without duplicating that information to the + # ClusterVolumeSpec. + type: "object" + description: "Volume configuration" + properties: + Spec: + $ref: "#/definitions/ClusterVolumeSpec" + description: | + The spec of the volume to update. Currently, only Availability may + change. All other fields must remain unchanged. + - name: "version" + in: "query" + description: | + The version number of the volume being updated. This is required to + avoid conflicting writes. Found in the volume's `ClusterVolume` + field. + type: "integer" + format: "int64" + required: true + tags: ["Volume"] + + delete: + summary: "Remove a volume" + description: "Instruct the driver to remove the volume." + operationId: "VolumeDelete" + responses: + 204: + description: "The volume was removed" + 404: + description: "No such volume or volume driver" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Volume is in use and cannot be removed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + - name: "force" + in: "query" + description: "Force the removal of the volume" + type: "boolean" + default: false + tags: ["Volume"] + + /volumes/prune: + post: + summary: "Delete unused volumes" + produces: + - "application/json" + operationId: "VolumePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune volumes with (or without, in case `label!=...` is used) the specified labels. + - `all` (`all=true`) - Consider all (local) volumes for pruning and not just anonymous volumes. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "VolumePruneResponse" + properties: + VolumesDeleted: + description: "Volumes that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Volume"] + /networks: + get: + summary: "List networks" + description: | + Returns a list of networks. For details on the format, see the + [network inspect endpoint](#operation/NetworkInspect). + + Note that it uses a different, smaller representation of a network than + inspecting a single network. For example, the list of containers attached + to the network is not propagated in API versions 1.28 and up. + operationId: "NetworkList" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Network" + examples: + application/json: + - Name: "bridge" + Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" + Created: "2016-10-19T06:21:00.416543526Z" + Scope: "local" + Driver: "bridge" + EnableIPv4: true + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: + - + Subnet: "172.17.0.0/16" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + - Name: "none" + Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "null" + EnableIPv4: false + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + - Name: "host" + Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "host" + EnableIPv4: false + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to process + on the networks list. + + Available filters: + + - `dangling=` When set to `true` (or `1`), returns all + networks that are not in use by a container. When set to `false` + (or `0`), only networks that are in use by one or more + containers are returned. + - `driver=` Matches a network's driver. + - `id=` Matches all or part of a network ID. + - `label=` or `label==` of a network label. + - `name=` Matches all or part of a network name. + - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`). + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + type: "string" + tags: ["Network"] + + /networks/{id}: + get: + summary: "Inspect a network" + operationId: "NetworkInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Network" + 404: + description: "Network not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "verbose" + in: "query" + description: "Detailed inspect output for troubleshooting" + type: "boolean" + default: false + - name: "scope" + in: "query" + description: "Filter the network by scope (swarm, global, or local)" + type: "string" + tags: ["Network"] + + delete: + summary: "Remove a network" + operationId: "NetworkDelete" + responses: + 204: + description: "No error" + 403: + description: "operation not supported for pre-defined networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such network" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + tags: ["Network"] + + /networks/create: + post: + summary: "Create a network" + operationId: "NetworkCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "Network created successfully" + schema: + $ref: "#/definitions/NetworkCreateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: | + Forbidden operation. This happens when trying to create a network named after a pre-defined network, + or when trying to create an overlay network on a daemon which is not part of a Swarm cluster. + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "plugin not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "networkConfig" + in: "body" + description: "Network configuration" + required: true + schema: + type: "object" + title: "NetworkCreateRequest" + required: ["Name"] + properties: + Name: + description: "The network's name." + type: "string" + example: "my_network" + Driver: + description: "Name of the network driver plugin to use." + type: "string" + default: "bridge" + example: "bridge" + Scope: + description: | + The level at which the network exists (e.g. `swarm` for cluster-wide + or `local` for machine level). + type: "string" + Internal: + description: "Restrict external access to the network." + type: "boolean" + Attachable: + description: | + Globally scoped network is manually attachable by regular + containers from workers in swarm mode. + type: "boolean" + example: true + Ingress: + description: | + Ingress network is the network which provides the routing-mesh + in swarm mode. + type: "boolean" + example: false + ConfigOnly: + description: | + Creates a config-only network. Config-only networks are placeholder + networks for network configurations to be used by other networks. + Config-only networks cannot be used directly to run containers + or services. + type: "boolean" + default: false + example: false + ConfigFrom: + description: | + Specifies the source which will provide the configuration for + this network. The specified network must be an existing + config-only network; see ConfigOnly. + $ref: "#/definitions/ConfigReference" + IPAM: + description: "Optional custom IP scheme for the network." + $ref: "#/definitions/IPAM" + EnableIPv4: + description: "Enable IPv4 on the network." + type: "boolean" + example: true + EnableIPv6: + description: "Enable IPv6 on the network." + type: "boolean" + example: true + Options: + description: "Network specific options to be used by the drivers." + type: "object" + additionalProperties: + type: "string" + example: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + tags: ["Network"] + + /networks/{id}/connect: + post: + summary: "Connect a container to a network" + description: "The network must be either a local-scoped network or a swarm-scoped network with the `attachable` option set. A network cannot be re-attached to a running container" + operationId: "NetworkConnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "Operation forbidden" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + title: "NetworkConnectRequest" + properties: + Container: + type: "string" + description: "The ID or name of the container to connect to the network." + EndpointConfig: + $ref: "#/definitions/EndpointSettings" + example: + Container: "3613f73ba0e4" + EndpointConfig: + IPAMConfig: + IPv4Address: "172.24.56.89" + IPv6Address: "2001:db8::5689" + MacAddress: "02:42:ac:12:05:02" + Priority: 100 + tags: ["Network"] + + /networks/{id}/disconnect: + post: + summary: "Disconnect a container from a network" + operationId: "NetworkDisconnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + title: "NetworkDisconnectRequest" + properties: + Container: + type: "string" + description: | + The ID or name of the container to disconnect from the network. + Force: + type: "boolean" + description: | + Force the container to disconnect from the network. + tags: ["Network"] + /networks/prune: + post: + summary: "Delete unused networks" + produces: + - "application/json" + operationId: "NetworkPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=` Prune networks created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune networks with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "NetworkPruneResponse" + properties: + NetworksDeleted: + description: "Networks that were deleted" + type: "array" + items: + type: "string" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Network"] + /plugins: + get: + summary: "List plugins" + operationId: "PluginList" + description: "Returns information about installed plugins." + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Plugin" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the plugin list. + + Available filters: + + - `capability=` + - `enable=|` + tags: ["Plugin"] + + /plugins/privileges: + get: + summary: "Get plugin privileges" + operationId: "GetPluginPrivileges" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + tags: + - "Plugin" + + /plugins/pull: + post: + summary: "Install a plugin" + operationId: "PluginPull" + description: | + Pulls and installs a plugin. After the plugin is installed, it can be + enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). + produces: + - "application/json" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + Remote reference for plugin to install. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "name" + in: "query" + description: | + Local name for the pulled plugin. + + The `:latest` tag is optional, and is used as the default if omitted. + required: false + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration to use when pulling a plugin + from a registry. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/{name}/json: + get: + summary: "Inspect a plugin" + operationId: "PluginInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + tags: ["Plugin"] + /plugins/{name}: + delete: + summary: "Remove a plugin" + operationId: "PluginDelete" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "force" + in: "query" + description: | + Disable the plugin before removing. This may result in issues if the + plugin is in use by a container. + type: "boolean" + default: false + tags: ["Plugin"] + /plugins/{name}/enable: + post: + summary: "Enable a plugin" + operationId: "PluginEnable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "timeout" + in: "query" + description: "Set the HTTP client timeout (in seconds)" + type: "integer" + default: 0 + tags: ["Plugin"] + /plugins/{name}/disable: + post: + summary: "Disable a plugin" + operationId: "PluginDisable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "force" + in: "query" + description: | + Force disable a plugin even if still in use. + required: false + type: "boolean" + tags: ["Plugin"] + /plugins/{name}/upgrade: + post: + summary: "Upgrade a plugin" + operationId: "PluginUpgrade" + responses: + 204: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "remote" + in: "query" + description: | + Remote reference to upgrade to. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration to use when pulling a plugin + from a registry. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/create: + post: + summary: "Create a plugin" + operationId: "PluginCreate" + consumes: + - "application/x-tar" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "query" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "tarContext" + in: "body" + description: "Path to tar containing plugin rootfs and manifest" + schema: + type: "string" + format: "binary" + tags: ["Plugin"] + /plugins/{name}/push: + post: + summary: "Push a plugin" + operationId: "PluginPush" + description: | + Push a plugin to the registry. + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + responses: + 200: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /plugins/{name}/set: + post: + summary: "Configure a plugin" + operationId: "PluginSet" + consumes: + - "application/json" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + type: "string" + example: ["DEBUG=1"] + responses: + 204: + description: "No error" + 404: + description: "Plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /nodes: + get: + summary: "List nodes" + operationId: "NodeList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Node" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `id=` + - `label=` + - `membership=`(`accepted`|`pending`)` + - `name=` + - `node.label=` + - `role=`(`manager`|`worker`)` + type: "string" + tags: ["Node"] + /nodes/{id}: + get: + summary: "Inspect a node" + operationId: "NodeInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Node" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + tags: ["Node"] + delete: + summary: "Delete a node" + operationId: "NodeDelete" + responses: + 200: + description: "no error" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + - name: "force" + in: "query" + description: "Force remove a node from the swarm" + default: false + type: "boolean" + tags: ["Node"] + /nodes/{id}/update: + post: + summary: "Update a node" + operationId: "NodeUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID of the node" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/NodeSpec" + - name: "version" + in: "query" + description: | + The version number of the node object being updated. This is required + to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Node"] + /swarm: + get: + summary: "Inspect swarm" + operationId: "SwarmInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Swarm" + 404: + description: "no such swarm" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/init: + post: + summary: "Initialize a new swarm" + operationId: "SwarmInit" + produces: + - "application/json" + - "text/plain" + responses: + 200: + description: "no error" + schema: + description: "The node ID" + type: "string" + example: "7v2t30z9blmxuhnyo6s4cpenp" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmInitRequest" + properties: + ListenAddr: + description: | + Listen address used for inter-manager communication, as well + as determining the networking interface used for the VXLAN + Tunnel Endpoint (VTEP). This can either be an address/port + combination in the form `192.168.1.1:4567`, or an interface + followed by a port number, like `eth0:4567`. If the port number + is omitted, the default swarm listening port is used. + type: "string" + AdvertiseAddr: + description: | + Externally reachable address advertised to other nodes. This + can either be an address/port combination in the form + `192.168.1.1:4567`, or an interface followed by a port number, + like `eth0:4567`. If the port number is omitted, the port + number from the listen address is used. If `AdvertiseAddr` is + not specified, it will be automatically detected when possible. + type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: + ``), for example, `192.168.1.1`, or an interface, + like `eth0`. If `DataPathAddr` is unspecified, the same address + as `AdvertiseAddr` is used. + + The `DataPathAddr` specifies the address that global scope + network drivers will publish towards other nodes in order to + reach the containers running on this node. Using this parameter + it is possible to separate the container data traffic from the + management traffic of the cluster. + type: "string" + DataPathPort: + description: | + DataPathPort specifies the data path port number for data traffic. + Acceptable port range is 1024 to 49151. + if no port is set or is set to 0, default port 4789 will be used. + type: "integer" + format: "uint32" + DefaultAddrPool: + description: | + Default Address Pool specifies default subnet pools for global + scope networks. + type: "array" + items: + type: "string" + example: ["10.10.0.0/16", "20.20.0.0/16"] + ForceNewCluster: + description: "Force creation of a new swarm." + type: "boolean" + SubnetSize: + description: | + SubnetSize specifies the subnet size of the networks created + from the default subnet pool. + type: "integer" + format: "uint32" + Spec: + $ref: "#/definitions/SwarmSpec" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + DataPathPort: 4789 + DefaultAddrPool: ["10.10.0.0/8", "20.20.0.0/8"] + SubnetSize: 24 + ForceNewCluster: false + Spec: + Orchestration: {} + Raft: {} + Dispatcher: {} + CAConfig: {} + EncryptionConfig: + AutoLockManagers: false + tags: ["Swarm"] + /swarm/join: + post: + summary: "Join an existing swarm" + operationId: "SwarmJoin" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmJoinRequest" + properties: + ListenAddr: + description: | + Listen address used for inter-manager communication if the node + gets promoted to manager, as well as determining the networking + interface used for the VXLAN Tunnel Endpoint (VTEP). + type: "string" + AdvertiseAddr: + description: | + Externally reachable address advertised to other nodes. This + can either be an address/port combination in the form + `192.168.1.1:4567`, or an interface followed by a port number, + like `eth0:4567`. If the port number is omitted, the port + number from the listen address is used. If `AdvertiseAddr` is + not specified, it will be automatically detected when possible. + type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: + ``), for example, `192.168.1.1`, or an interface, + like `eth0`. If `DataPathAddr` is unspecified, the same address + as `AdvertiseAddr` is used. + + The `DataPathAddr` specifies the address that global scope + network drivers will publish towards other nodes in order to + reach the containers running on this node. Using this parameter + it is possible to separate the container data traffic from the + management traffic of the cluster. + + type: "string" + RemoteAddrs: + description: | + Addresses of manager nodes already participating in the swarm. + type: "array" + items: + type: "string" + JoinToken: + description: "Secret token for joining this swarm." + type: "string" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + DataPathAddr: "192.168.1.1" + RemoteAddrs: + - "node1:2377" + JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + tags: ["Swarm"] + /swarm/leave: + post: + summary: "Leave a swarm" + operationId: "SwarmLeave" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "force" + description: | + Force leave swarm, even if this is the last manager or that it will + break the cluster. + in: "query" + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/update: + post: + summary: "Update a swarm" + operationId: "SwarmUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + $ref: "#/definitions/SwarmSpec" + - name: "version" + in: "query" + description: | + The version number of the swarm object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + - name: "rotateWorkerToken" + in: "query" + description: "Rotate the worker join token." + type: "boolean" + default: false + - name: "rotateManagerToken" + in: "query" + description: "Rotate the manager join token." + type: "boolean" + default: false + - name: "rotateManagerUnlockKey" + in: "query" + description: "Rotate the manager unlock key." + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/unlockkey: + get: + summary: "Get the unlock key" + operationId: "SwarmUnlockkey" + consumes: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "UnlockKeyResponse" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/unlock: + post: + summary: "Unlock a locked manager" + operationId: "SwarmUnlock" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmUnlockRequest" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /services: + get: + summary: "List services" + operationId: "ServiceList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Service" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the services list. + + Available filters: + + - `id=` + - `label=` + - `mode=["replicated"|"global"]` + - `name=` + - name: "status" + in: "query" + type: "boolean" + description: | + Include service status, with count of running and desired tasks. + tags: ["Service"] + /services/create: + post: + summary: "Create a service" + operationId: "ServiceCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/ServiceCreateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "network is not eligible for services" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "name conflicts with an existing service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "web" + TaskTemplate: + ContainerSpec: + Image: "nginx:alpine" + Mounts: + - + ReadOnly: true + Source: "web-data" + Target: "/usr/share/nginx/html" + Type: "volume" + VolumeOptions: + DriverConfig: {} + Labels: + com.example.something: "something-value" + Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"] + User: "33" + DNSConfig: + Nameservers: ["8.8.8.8"] + Search: ["example.org"] + Options: ["timeout:3"] + Secrets: + - + File: + Name: "www.example.org.key" + UID: "33" + GID: "33" + Mode: 384 + SecretID: "fpjqlhnwb19zds35k8wn80lq9" + SecretName: "example_org_domain_key" + OomScoreAdj: 0 + LogDriver: + Name: "json-file" + Options: + max-file: "3" + max-size: "10M" + Placement: {} + Resources: + Limits: + MemoryBytes: 104857600 + Reservations: {} + RestartPolicy: + Condition: "on-failure" + Delay: 10000000000 + MaxAttempts: 10 + Mode: + Replicated: + Replicas: 4 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Ports: + - + Protocol: "tcp" + PublishedPort: 8080 + TargetPort: 80 + Labels: + foo: "bar" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration for pulling from private + registries. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + tags: ["Service"] + /services/{id}: + get: + summary: "Inspect a service" + operationId: "ServiceInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Service" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "insertDefaults" + in: "query" + description: "Fill empty fields with default values." + type: "boolean" + default: false + tags: ["Service"] + delete: + summary: "Delete a service" + operationId: "ServiceDelete" + responses: + 200: + description: "no error" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + tags: ["Service"] + /services/{id}/update: + post: + summary: "Update a service" + operationId: "ServiceUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ServiceUpdateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "top" + TaskTemplate: + ContainerSpec: + Image: "busybox" + Args: + - "top" + OomScoreAdj: 0 + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + + - name: "version" + in: "query" + description: | + The version number of the service object being updated. This is + required to avoid conflicting writes. + This version number should be the value as currently set on the + service *before* the update. You can find the current version by + calling `GET /services/{id}` + required: true + type: "integer" + - name: "registryAuthFrom" + in: "query" + description: | + If the `X-Registry-Auth` header is not specified, this parameter + indicates where to find registry authorization credentials. + type: "string" + enum: ["spec", "previous-spec"] + default: "spec" + - name: "rollback" + in: "query" + description: | + Set to this parameter to `previous` to cause a server-side rollback + to the previous service spec. The supplied spec will be ignored in + this case. + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration for pulling from private + registries. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + + tags: ["Service"] + /services/{id}/logs: + get: + summary: "Get service logs" + description: | + Get `stdout` and `stderr` logs from a service. See also + [`/containers/{id}/logs`](#operation/ContainerLogs). + + **Note**: This endpoint works only for services with the `local`, + `json-file` or `journald` logging drivers. + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + operationId: "ServiceLogs" + responses: + 200: + description: "logs returned as a stream in response body" + schema: + type: "string" + format: "binary" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such service: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the service" + type: "string" + - name: "details" + in: "query" + description: "Show service context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Service"] + /tasks: + get: + summary: "List tasks" + operationId: "TaskList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Task" + example: + - ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + - ID: "1yljwbmlr8er2waf8orvqpwms" + Version: + Index: 30 + CreatedAt: "2016-06-07T21:07:30.019104782Z" + UpdatedAt: "2016-06-07T21:07:30.231958098Z" + Name: "hopeful_cori" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:30.202183143Z" + State: "shutdown" + Message: "shutdown" + ContainerStatus: + ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" + DesiredState: "shutdown" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.5/16" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the tasks list. + + Available filters: + + - `desired-state=(running | shutdown | accepted)` + - `id=` + - `label=key` or `label="key=value"` + - `name=` + - `node=` + - `service=` + tags: ["Task"] + /tasks/{id}: + get: + summary: "Inspect a task" + operationId: "TaskInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Task" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID of the task" + required: true + type: "string" + tags: ["Task"] + /tasks/{id}/logs: + get: + summary: "Get task logs" + description: | + Get `stdout` and `stderr` logs from a task. + See also [`/containers/{id}/logs`](#operation/ContainerLogs). + + **Note**: This endpoint works only for services with the `local`, + `json-file` or `journald` logging drivers. + operationId: "TaskLogs" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 200: + description: "logs returned as a stream in response body" + schema: + type: "string" + format: "binary" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such task: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID of the task" + type: "string" + - name: "details" + in: "query" + description: "Show task context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Task"] + /secrets: + get: + summary: "List secrets" + operationId: "SecretList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Secret" + example: + - ID: "blt1owaxmitz71s9v5zh81zun" + Version: + Index: 85 + CreatedAt: "2017-07-20T13:55:28.678958722Z" + UpdatedAt: "2017-07-20T13:55:28.678958722Z" + Spec: + Name: "mysql-passwd" + Labels: + some.label: "some.value" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + Labels: + foo: "bar" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the secrets list. + + Available filters: + + - `id=` + - `label= or label==value` + - `name=` + - `names=` + tags: ["Secret"] + /secrets/create: + post: + summary: "Create a secret" + operationId: "SecretCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/SecretSpec" + - type: "object" + example: + Name: "app-key.crt" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + tags: ["Secret"] + /secrets/{id}: + get: + summary: "Inspect a secret" + operationId: "SecretInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Secret" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + Labels: + foo: "bar" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + delete: + summary: "Delete a secret" + operationId: "SecretDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + /secrets/{id}/update: + post: + summary: "Update a Secret" + operationId: "SecretUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such secret" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the secret" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/SecretSpec" + description: | + The spec of the secret to update. Currently, only the Labels field + can be updated. All other fields must remain unchanged from the + [SecretInspect endpoint](#operation/SecretInspect) response values. + - name: "version" + in: "query" + description: | + The version number of the secret object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Secret"] + /configs: + get: + summary: "List configs" + operationId: "ConfigList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Config" + example: + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "server.conf" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the configs list. + + Available filters: + + - `id=` + - `label= or label==value` + - `name=` + - `names=` + tags: ["Config"] + /configs/create: + post: + summary: "Create a config" + operationId: "ConfigCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IDResponse" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/ConfigSpec" + - type: "object" + example: + Name: "server.conf" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + tags: ["Config"] + /configs/{id}: + get: + summary: "Inspect a config" + operationId: "ConfigInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Config" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + delete: + summary: "Delete a config" + operationId: "ConfigDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + /configs/{id}/update: + post: + summary: "Update a Config" + operationId: "ConfigUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such config" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the config" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/ConfigSpec" + description: | + The spec of the config to update. Currently, only the Labels field + can be updated. All other fields must remain unchanged from the + [ConfigInspect endpoint](#operation/ConfigInspect) response values. + - name: "version" + in: "query" + description: | + The version number of the config object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Config"] + /distribution/{name}/json: + get: + summary: "Get image information from the registry" + description: | + Return image digest and platform information by contacting the registry. + operationId: "DistributionInspect" + produces: + - "application/json" + responses: + 200: + description: "descriptor and platform information" + schema: + $ref: "#/definitions/DistributionInspect" + 401: + description: "Failed authentication or no image found" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + tags: ["Distribution"] + /session: + post: + summary: "Initialize interactive session" + description: | + Start a new interactive session with a server. Session allows server to + call back to the client for advanced capabilities. + + ### Hijacking + + This endpoint hijacks the HTTP connection to HTTP2 transport that allows + the client to expose gPRC services on that connection. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /session HTTP/1.1 + Upgrade: h2c + Connection: Upgrade + ``` + + The Docker daemon responds with a `101 UPGRADED` response follow with + the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Connection: Upgrade + Upgrade: h2c + ``` + operationId: "Session" + produces: + - "application/vnd.docker.raw-stream" + responses: + 101: + description: "no error, hijacking successful" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Session"] diff --git a/_vendor/github.com/moby/moby/docs/api/version-history.md b/_vendor/github.com/moby/moby/docs/api/version-history.md index 44e6e7576bd0..4e295e452682 100644 --- a/_vendor/github.com/moby/moby/docs/api/version-history.md +++ b/_vendor/github.com/moby/moby/docs/api/version-history.md @@ -13,6 +13,135 @@ keywords: "API, Docker, rcli, REST, documentation" will be rejected. --> +## v1.51 API changes + +[Docker Engine API v1.51](https://docs.docker.com/reference/api/engine/version/v1.51/) documentation + +* `GET /images/json` now sets the value of `Containers` field for all images + to the count of containers using the image. + This field was previously always -1. + +## v1.50 API changes + +[Docker Engine API v1.50](https://docs.docker.com/reference/api/engine/version/v1.50/) documentation + +* `GET /info` now includes a `DiscoveredDevices` field. This is an array of + `DeviceInfo` objects, each providing details about a device discovered by a + device driver. + Currently only the CDI device driver is supported. +* `DELETE /images/{name}` now supports a `platforms` query parameter. It accepts + an array of JSON-encoded OCI Platform objects, allowing for selecting specific + platforms to delete content for. +* Deprecated: The `BridgeNfIptables` and `BridgeNfIp6tables` fields in the + `GET /info` response were deprecated in API v1.48, and are now omitted + in API v1.50. +* Deprecated: `GET /images/{name}/json` no longer returns the following `Config` + fields; `Hostname`, `Domainname`, `AttachStdin`, `AttachStdout`, `AttachStderr` + `Tty`, `OpenStdin`, `StdinOnce`, `Image`, `NetworkDisabled` (already omitted unless set), + `MacAddress` (already omitted unless set), `StopTimeout` (already omitted unless set). + These additional fields were included in the response due to an implementation + detail but not part of the image's Configuration. These fields were marked + deprecated in API v1.46, and are now omitted. Older versions of the API still + return these fields, but they are always empty. + +## v1.49 API changes + +[Docker Engine API v1.49](https://docs.docker.com/reference/api/engine/version/v1.49/) documentation + +* `GET /images/{name}/json` now supports a `platform` parameter (JSON + encoded OCI Platform type) allowing to specify a platform of the multi-platform + image to inspect. + This option is mutually exclusive with the `manifests` option. +* `GET /info` now returns a `FirewallBackend` containing information about + the daemon's firewalling configuration. +* Deprecated: The `AllowNondistributableArtifactsCIDRs` and `AllowNondistributableArtifactsHostnames` + fields in the `RegistryConfig` struct in the `GET /info` response are omitted + in API v1.49. +* Deprecated: The `ContainerdCommit.Expected`, `RuncCommit.Expected`, and + `InitCommit.Expected` fields in the `GET /info` endpoint were deprecated + in API v1.48, and are now omitted in API v1.49. + +## v1.48 API changes + +[Docker Engine API v1.48](https://docs.docker.com/reference/api/engine/version/v1.48/) documentation + +* Deprecated: The "error" and "progress" fields in streaming responses for + endpoints that return a JSON progress response, such as `POST /images/create`, + `POST /images/{name}/push`, and `POST /build` are deprecated. These fields + were marked deprecated in API v1.4 (docker v0.6.0) and API v1.8 (docker v0.7.1) + respectively, but still returned. These fields will be left empty or will + be omitted in a future API version. Users should use the information in the + `errorDetail` and `progressDetail` fields instead. +* Deprecated: The "allow-nondistributable-artifacts" daemon configuration is + deprecated and enabled by default. The `AllowNondistributableArtifactsCIDRs` + and `AllowNondistributableArtifactsHostnames` fields in the `RegistryConfig` + struct in the `GET /info` response will now always be `null` and will be + omitted in API v1.49. +* Deprecated: The `BridgeNfIptables` and `BridgeNfIp6tables` fields in the + `GET /info` response are now always be `false` and will be omitted in API + v1.49. The netfilter module is now loaded on-demand, and no longer during + daemon startup, making these fields obsolete. +* `GET /images/{name}/history` now supports a `platform` parameter (JSON + encoded OCI Platform type) that allows to specify a platform to show the + history of. +* `POST /images/{name}/load` and `GET /images/{name}/get` now support a + `platform` parameter (JSON encoded OCI Platform type) that allows to specify + a platform to load/save. Not passing this parameter will result in + loading/saving the full multi-platform image. +* `POST /containers/create` now includes a warning in the response when setting + the container-wide `Config.VolumeDriver` option in combination with volumes + defined through `Mounts` because the `VolumeDriver` option has no effect on + those volumes. This warning was previously generated by the CLI, but now + moved to the daemon so that other clients can also get this warning. +* `POST /containers/create` now supports `Mount` of type `image` for mounting + an image inside a container. +* Deprecated: The `ContainerdCommit.Expected`, `RuncCommit.Expected`, and + `InitCommit.Expected` fields in the `GET /info` endpoint are deprecated + and will be omitted in API v1.49. +* `Sysctls` in `HostConfig` (top level `--sysctl` settings) for `eth0` are + no longer migrated to `DriverOpts`, as described in the changes for v1.46. +* `GET /images/json` and `GET /images/{name}/json` responses now include + `Descriptor` field, which contains an OCI descriptor of the image target. + The new field will only be populated if the daemon provides a multi-platform + image store. + WARNING: This is experimental and may change at any time without any backward + compatibility. +* `GET /images/{name}/json` response now will return the `Manifests` field + containing information about the sub-manifests contained in the image index. + This includes things like platform-specific manifests and build attestations. + The new field will only be populated if the request also sets the `manifests` + query parameter to `true`. + This acts the same as in the `GET /images/json` endpoint. + WARNING: This is experimental and may change at any time without any backward compatibility. +* `GET /containers/{name}/json` now returns an `ImageManifestDescriptor` field + containing the OCI descriptor of the platform-specific image manifest of the + image that was used to create the container. + This field is only populated if the daemon provides a multi-platform image + store. +* `POST /networks/create` now has an `EnableIPv4` field. Setting it to `false` + disables IPv4 IPAM for the network. It can only be set to `false` if the + daemon has experimental features enabled. +* `GET /networks/{id}` now returns an `EnableIPv4` field showing whether the + network has IPv4 IPAM enabled. +* `POST /networks/{id}/connect` and `POST /containers/create` now accept a + `GwPriority` field in `EndpointsConfig`. This value is used to determine which + network endpoint provides the default gateway for the container. The endpoint + with the highest priority is selected. If multiple endpoints have the same + priority, endpoints are sorted lexicographically by their network name, and + the one that sorts first is picked. +* `GET /containers/json` now returns a `GwPriority` field in `NetworkSettings` + for each network endpoint. +* API debug endpoints (`GET /debug/vars`, `GET /debug/pprof/`, `GET /debug/pprof/cmdline`, + `GET /debug/pprof/profile`, `GET /debug/pprof/symbol`, `GET /debug/pprof/trace`, + `GET /debug/pprof/{name}`) are now also accessible through the versioned-API + paths (`/v/`). +* `POST /build/prune` renames `keep-bytes` to `reserved-space` and now supports + additional prune parameters `max-used-space` and `min-free-space`. +* `GET /containers/json` now returns an `ImageManifestDescriptor` field + matching the same field in `/containers/{name}/json`. + This field is only populated if the daemon provides a multi-platform image + store. + ## v1.47 API changes [Docker Engine API v1.47](https://docs.docker.com/reference/api/engine/version/v1.47/) documentation diff --git a/_vendor/modules.txt b/_vendor/modules.txt index 3a19633bb399..3d91ae7b0949 100644 --- a/_vendor/modules.txt +++ b/_vendor/modules.txt @@ -1,6 +1,7 @@ -# github.com/moby/moby v27.5.0+incompatible -# github.com/moby/buildkit v0.19.0 -# github.com/docker/buildx v0.20.0 -# github.com/docker/cli v27.5.0+incompatible -# github.com/docker/compose/v2 v2.32.4 -# github.com/docker/scout-cli v1.15.0 +# github.com/moby/moby v28.3.2+incompatible +# github.com/moby/buildkit v0.23.2 +# github.com/docker/buildx v0.25.0 +# github.com/docker/cli v28.3.3-0.20250711132746-c69d8bde4adc+incompatible +# github.com/docker/compose/v2 v2.38.2 +# github.com/docker/model-cli v0.1.33-0.20250703103301-d4e4936a9eb2 +# github.com/docker/scout-cli v1.18.1 diff --git a/assets/css/code.css b/assets/css/code.css deleted file mode 100644 index fa4bb4bd34b6..000000000000 --- a/assets/css/code.css +++ /dev/null @@ -1,81 +0,0 @@ -@layer components { - .prose { - .highlight, - :not(pre) > code { - font-size: 0.875em; - border: 1px solid; - border-radius: theme("spacing.1"); - background: theme("colors.white"); - border-color: theme("colors.gray.light.300"); - .dark & { - background: theme("colors.gray.dark.200"); - border-color: theme("colors.gray.dark.300"); - } - } - - :not(pre) > code { - background: theme("colors.gray.light.200"); - display: inline-block; - margin: 0; - font-weight: 400; - overflow-wrap: anywhere; - padding: 0 4px; - } - - table:not(.lntable) code { - overflow-wrap: unset; - white-space: nowrap; - } - - /* Indented code blocks */ - pre:not(.chroma) { - @apply my-4 overflow-x-auto p-3; - font-size: 0.875em; - border: 1px solid; - border-radius: theme("spacing.1"); - background: theme("colors.white"); - border-color: theme("colors.gray.light.300"); - .dark & { - background: theme("colors.gray.dark.200"); - border-color: theme("colors.gray.dark.300"); - } - } - - .highlight { - @apply my-4 overflow-x-auto p-3; - - /* LineTableTD */ - .lntd { - vertical-align: top; - padding: 0; - margin: 0; - font-weight: 400; - padding: 0 4px; - &:first-child { - width: 0; - } - } - - /* LineTableTD */ - .lntd { - vertical-align: top; - padding: 0; - margin: 0; - border: 0; - } - /* LineTable */ - .lntable { - display: table; - width: 100%; - border-spacing: 0; - padding: 0; - margin: 0; - border: 0; - /* LineNumberColumnHighlight */ - .lntd:first-child .hl { - display: block; - } - } - } - } -} diff --git a/assets/css/components.css b/assets/css/components.css new file mode 100644 index 000000000000..108134dd29ac --- /dev/null +++ b/assets/css/components.css @@ -0,0 +1,115 @@ +@layer components { + .card { + @apply mt-2 mb-2 flex flex-col gap-2 rounded-sm border border-gray-200 p-3; + @apply dark:border-gray-700 dark:bg-gray-900; + @apply transition-shadow duration-200; + &:hover, + &:focus { + @apply border-gray-300 dark:border-gray-600; + } + } + .card-link:hover { + @apply !no-underline; + } + .card-header { + @apply mb-2 flex items-center gap-2; + @apply text-gray-700 dark:text-gray-100; + } + .card-icon { + @apply text-gray-700 dark:text-gray-100; + } + .card-img, + .card-img svg { + @apply m-0 flex max-h-5 min-h-5 max-w-5 min-w-5 items-center justify-center fill-current; + } + .card-title { + @apply font-semibold; + } + .card-link { + @apply block text-inherit no-underline hover:underline; + } + .card-description { + @apply text-gray-600; + @apply dark:text-gray-300; + } + + .admonition { + @apply relative mb-4 flex w-full flex-col items-start gap-3 rounded-sm px-6 py-4; + @apply bg-gray-50 dark:bg-gray-900; + } + .admonition-header { + @apply flex flex-wrap items-center gap-2; + } + .admonition-title { + @apply font-semibold; + } + .admonition-content { + @apply w-full min-w-0 flex-1 flex-wrap overflow-x-auto break-words; + color: var(--tw-prose-body); + } + .admonition-note { + @apply border-blue-400 bg-blue-50 text-blue-900; + @apply dark:border-blue-600 dark:bg-blue-950 dark:text-blue-100; + } + .admonition-tip { + @apply border-green-400 bg-green-100 text-green-900; + @apply dark:border-green-600 dark:bg-green-950 dark:text-green-100; + } + .admonition-warning { + @apply border-yellow-400 bg-yellow-50 text-yellow-900; + @apply dark:border-yellow-600 dark:bg-yellow-950 dark:text-yellow-100; + } + .admonition-danger { + @apply border-red-400 bg-red-50 text-red-900; + @apply dark:border-red-600 dark:bg-red-950 dark:text-red-100; + } + .admonition-important { + @apply border-purple-400 bg-purple-50 text-purple-900; + @apply dark:border-purple-600 dark:bg-purple-950 dark:text-purple-100; + } + .admonition-icon { + @apply flex-shrink-0; + width: 24px; + height: 24px; + min-width: 24px; + min-height: 24px; + display: flex; + align-items: center; + justify-content: center; + } + + .download-links { + @apply block; + @apply text-gray-800; + @apply dark:text-gray-200; + } + .download-links a { + @apply link; + } + .download-links-subcontainer { + @apply flex flex-wrap gap-2; + } + + .card-image { + @apply h-12 w-12 overflow-hidden; + } +} +.button { + @apply my-2 mr-2 inline-block rounded-sm bg-blue-500 p-1 px-3 text-blue-50 text-white hover:bg-blue-600 dark:bg-blue-500 hover:dark:bg-blue-400; +} + +.summary-bar { + @apply my-1 mt-4 flex flex-col rounded-sm border-1 border-gray-100 bg-gray-50 p-4 dark:border-gray-800 dark:bg-gray-900; +} + +.tabs { + @apply bg-blue/2 rounded-sm p-2; +} +.tablist { + @apply mb-1 border-b border-gray-100 dark:border-gray-800; +} + +.tab-item { + @apply inline-block rounded-t-sm px-3 py-2 hover:bg-gray-100 dark:hover:bg-gray-900; + @apply dark:text-gray-200; +} diff --git a/assets/css/global.css b/assets/css/global.css index fa6742830e81..8ff730389ba8 100644 --- a/assets/css/global.css +++ b/assets/css/global.css @@ -1,89 +1,94 @@ /* global styles */ -@layer base { - [x-cloak=""] { +[x-cloak=""] { + display: none !important; +} +/* alpine cloak for small screens only */ +[x-cloak="sm"] { + @media (width <= 768px) { display: none !important; } - /* alpine cloak for small screens only */ - [x-cloak="sm"] { - @media (width <= 768px) { - display: none !important; - } +} +:root { + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; + + scrollbar-color: var(--color-gray-400) rgba(0, 0, 0, 0.05); + &.dark { + scrollbar-color: var(--color-gray-700) rgba(255, 255, 255, 0.1); } +} - :root { - -webkit-font-smoothing: antialiased; - -moz-osx-font-smoothing: grayscale; +mark { + @apply bg-transparent font-bold text-blue-500 dark:text-blue-800; +} - scrollbar-color: theme(colors.gray.light.400) theme(colors.black / 0.05); - &.dark { - scrollbar-color: theme(colors.gray.dark.800) theme(colors.white / 0.10); - } - } +/* Hide the clear (X) button for search inputs */ +/* Chrome, Safari, Edge, and Opera */ +input[type="search"]::-webkit-search-cancel-button { + -webkit-appearance: none; + appearance: none; +} - mark { - @apply bg-transparent font-bold text-blue-light dark:text-blue-dark; - } +/* Firefox */ +input[type="search"]::-moz-search-cancel-button { + display: none; +} - /* Hide the clear (X) button for search inputs */ - /* Chrome, Safari, Edge, and Opera */ - input[type="search"]::-webkit-search-cancel-button { - -webkit-appearance: none; - appearance: none; +/* Internet Explorer and Edge (legacy) */ +input[type="search"]::-ms-clear { + display: none; +} +.prose { + hr { + @apply mt-8 mb-4; } - - /* Firefox */ - input[type="search"]::-moz-search-cancel-button { - display: none; + :where(h1):not(:where([class~="not-prose"], [class~="not-prose"] *)) { + font-weight: 500 !important; + font-size: 180% !important; + margin-bottom: 0.4em !important; } - - /* Internet Explorer and Edge (legacy) */ - input[type="search"]::-ms-clear { - display: none; + > h2 { + @apply mt-7! mb-3!; + font-size: 160% !important; + a { + @apply hover:no-underline!; + } } -} - -/* utility classes */ - -@layer utilities { - .link { - @apply text-blue-light underline underline-offset-2 dark:text-blue-dark; + > h3 { + font-size: 130% !important; + a { + @apply hover:no-underline!; + } } - - .invertible { - @apply dark:hue-rotate-180 dark:invert dark:filter; + > h4 { + a { + @apply hover:no-underline!; + } } - - .bg-pattern-blue { - background-color: theme(colors.white / 50%); - background-image: url('/assets/images/bg-pattern-blue.webp'); - background-blend-mode: overlay; - background-size: cover; - background-repeat: none; - .dark & { - background-color: theme(colors.black / 70%); + > h5 { + a { + @apply hover:no-underline!; } } + ol { + list-style-type: decimal; + } - .bg-pattern-purple { - background-color: theme(colors.white / 50%); - background-image: url('/assets/images/bg-pattern-purple.webp'); - background-blend-mode: overlay; - background-size: cover; - background-repeat: none; - .dark & { - background-color: theme(colors.black / 70%); - } + ol ol { + list-style-type: lower-alpha; } - .bg-pattern-verde { - background-color: theme(colors.white / 50%); - background-image: url('/assets/images/bg-pattern-verde.webp'); - background-blend-mode: overlay; - background-size: cover; - background-repeat: none; - .dark & { - background-color: theme(colors.black / 70%); - } + ol ol ol { + list-style-type: lower-roman; + } +} +.navbar-group:first-of-type { + margin-top: 0.2rem !important; +} + +#search-page-results { + mark:where(.dark, .dark *) { + color: var(--color-blue-400); } } diff --git a/assets/css/icons.css b/assets/css/icons.css deleted file mode 100644 index 08428273b262..000000000000 --- a/assets/css/icons.css +++ /dev/null @@ -1,29 +0,0 @@ -@layer utilities { - .icon-svg { - svg { - font-size: 24px; - width: 1em; - height: 1em; - display: inline-block; - fill: currentColor; - } - } - - .icon-xs { - svg { - font-size: 12px; - } - } - - .icon-sm { - svg { - font-size: 16px; - } - } - - .icon-lg { - svg { - font-size: 32px; - } - } -} diff --git a/assets/css/kapa.css b/assets/css/kapa.css deleted file mode 100644 index 5d9cb0bfb7ad..000000000000 --- a/assets/css/kapa.css +++ /dev/null @@ -1,19 +0,0 @@ -.mantine-Modal-root { - .mantine-Modal-inner { inset: 0; } - - ol { - list-style-type: decimal; - } - - .mantine-List-root { - min-width: 100%; - } - - .mantine-List-itemWrapper { - max-width: 100%; - } - - .mantine-Prism-copy { - background-color: rgb(20, 21, 23) - } -} diff --git a/assets/css/style.css b/assets/css/style.css new file mode 100644 index 000000000000..d8469b419a5f --- /dev/null +++ b/assets/css/style.css @@ -0,0 +1,45 @@ +/* Main CSS entry point */ +@import "tailwindcss"; +@plugin "@tailwindcss/typography"; +@source "hugo_stats.json"; + +@font-face { + font-family: "Roboto Flex"; + src: url("/assets/fonts/RobotoFlex.woff2") format("woff2"); + font-weight: 100 1000; /* Range of weights Roboto Flex supports */ + font-stretch: 100%; /* Range of width Roboto Flex supports */ + font-style: oblique 0deg 10deg; /* Range of oblique angle Roboto Flex supports */ + font-display: fallback; +} + +/* Roboto Mono */ +@font-face { + font-family: "Roboto Mono"; + src: url("/assets/fonts/RobotoMono-Regular.woff2") format("woff2"); + font-weight: 100 700; /* Define the range of weight the variable font supports */ + font-style: normal; + font-display: fallback; +} + +/* Roboto Mono Italic */ +@font-face { + font-family: "Roboto Mono"; + src: url("/assets/fonts/RobotoMono-Italic.woff2") format("woff2"); + font-weight: 100 700; /* Define the range of weight the variable font supports */ + font-style: italic; + font-display: fallback; +} + +@layer theme { + @import "theme.css"; +} + +@layer base { + @import "global.css"; +} +@import "utilities.css"; +@import "syntax-dark.css"; +@import "syntax-light.css"; +@import "components.css"; + +@variant dark (&:where(.dark, .dark *)); diff --git a/assets/css/styles.css b/assets/css/styles.css deleted file mode 100644 index b08613215446..000000000000 --- a/assets/css/styles.css +++ /dev/null @@ -1,16 +0,0 @@ -/* see also: tailwind.config.js */ - -@import "tailwindcss/base"; -@import "/assets/css/global"; -@import "/assets/css/typography"; -@import "/assets/css/hack"; - -@import "tailwindcss/components"; -@import "/assets/css/code"; -@import "/assets/css/toc"; -@import "/assets/css/kapa"; - -@import "tailwindcss/utilities"; -@import "/assets/css/syntax-light"; -@import "/assets/css/syntax-dark"; -@import "/assets/css/icons"; diff --git a/assets/css/syntax-dark.css b/assets/css/syntax-dark.css index ff24a1954882..e66c18186f6f 100644 --- a/assets/css/syntax-dark.css +++ b/assets/css/syntax-dark.css @@ -1,343 +1,337 @@ -@layer utilities { - .syntax-dark { - /* Other */ - .x { - color: theme("colors.white"); - } - /* Error */ - .err { - color: theme("colors.red.dark.500"); - } - /* CodeLine */ - .cl { - } - /* LineHighlight */ - .hl { - min-width: fit-content; - background-color: theme("colors.gray.dark.300"); - } - .lntd:first-child .hl, - & > .chroma > code > .hl { - margin-left: -4px; - border-left: 4px solid theme("colors.gray.dark.400"); - } - /* LineNumbersTable */ - .lnt { - white-space: pre; - user-select: none; - margin-right: 0.4em; - padding: 0 0.4em 0 0.4em; - color: theme("colors.gray.dark.400"); - } - /* LineNumbers */ - .ln { - white-space: pre; - user-select: none; - margin-right: 0.4em; - padding: 0 0.4em 0 0.4em; - color: theme("colors.gray.dark.400"); - } - /* Line */ - .line { - display: flex; - } - /* Keyword */ - .k { - color: theme("colors.amber.dark.700"); - } - /* KeywordConstant */ - .kc { - color: theme("colors.violet.dark.700"); - } - /* KeywordDeclaration */ - .kd { - color: theme("colors.amber.dark.700"); - } - /* KeywordNamespace */ - .kn { - color: theme("colors.amber.dark.700"); - } - /* KeywordPseudo */ - .kp { - color: theme("colors.amber.dark.700"); - } - /* KeywordReserved */ - .kr { - color: theme("colors.amber.dark.700"); - } - /* KeywordType */ - .kt { - color: theme("colors.amber.dark.700"); - } - /* Name */ - .n { - color: theme("colors.violet.dark.700"); - } - /* NameAttribute */ - .na { - color: theme("colors.amber.dark.700"); - } - /* NameBuiltin */ - .nb { - color: theme("colors.amber.dark.700"); - } - /* NameBuiltinPseudo */ - .bp { - color: theme("colors.violet.dark.700"); - } - /* NameClass */ - .nc { - color: theme("colors.white"); - } - /* NameConstant */ - .no { - color: theme("colors.white"); - } - /* NameDecorator */ - .nd { - color: theme("colors.violet.dark.700"); - } - /* NameEntity */ - .ni { - color: theme("colors.amber.dark.700"); - } - /* NameException */ - .ne { - color: theme("colors.red.dark.700"); - } - /* NameFunction */ - .nf { - color: theme("colors.blue.dark.600"); - } - /* NameFunctionMagic */ - .fm { - color: theme("colors.blue.dark.600"); - } - /* NameLabel */ - .nl { - color: theme("colors.amber.dark.500"); - } - /* NameNamespace */ - .nn { - color: theme("colors.white"); - } - /* NameOther */ - .nx { - color: theme("colors.white"); - } - /* NameProperty */ - .py { - color: theme("colors.white"); - } - /* NameTag */ - .nt { - color: theme("colors.green.dark.600"); - } - /* NameVariable */ - .nv { - color: theme("colors.white"); - } - /* NameVariableClass */ - .vc { - color: theme("colors.violet.dark.600"); - } - /* NameVariableGlobal */ - .vg { - color: theme("colors.violet.dark.600"); - } - /* NameVariableInstance */ - .vi { - color: theme("colors.violet.dark.600"); - } - /* NameVariableMagic */ - .vm { - color: theme("colors.violet.dark.600"); - } - /* Literal */ - .l { - color: theme("colors.white"); - } - /* LiteralDate */ - .ld { - color: theme("colors.green.dark.600"); - } - /* LiteralString */ - .s { - color: theme("colors.white"); - } - /* LiteralStringAffix */ - .sa { - color: theme("colors.green.dark.600"); - } - /* LiteralStringBacktick */ - .sb { - color: theme("colors.green.dark.600"); - } - /* LiteralStringChar */ - .sc { - color: theme("colors.green.dark.600"); - } - /* LiteralStringDelimiter */ - .dl { - color: theme("colors.green.dark.600"); - } - /* LiteralStringDoc */ - .sd { - color: theme("colors.green.dark.600"); - } - /* LiteralStringDouble */ - .s2 { - color: theme("colors.green.dark.600"); - } - /* LiteralStringEscape */ - .se { - color: theme("colors.white"); - } - /* LiteralStringHeredoc */ - .sh { - color: theme("colors.green.dark.600"); - } - /* LiteralStringInterpol */ - .si { - color: theme("colors.green.dark.600"); - } - /* LiteralStringOther */ - .sx { - color: theme("colors.green.dark.600"); - } - /* LiteralStringRegex */ - .sr { - color: theme("colors.blue.dark.500"); - } - /* LiteralStringSingle */ - .s1 { - color: theme("colors.green.dark.600"); - } - /* LiteralStringSymbol */ - .ss { - color: theme("colors.blue.dark.600"); - } - /* LiteralNumber */ - .m { - color: theme("colors.blue.dark.600"); - } - /* LiteralNumberBin */ - .mb { - color: theme("colors.blue.dark.600"); - } - /* LiteralNumberFloat */ - .mf { - color: theme("colors.blue.dark.600"); - } - /* LiteralNumberHex */ - .mh { - color: theme("colors.blue.dark.600"); - } - /* LiteralNumberInteger */ - .mi { - color: theme("colors.blue.dark.600"); - } - /* LiteralNumberIntegerLong */ - .il { - color: theme("colors.blue.dark.600"); - } - /* LiteralNumberOct */ - .mo { - color: theme("colors.blue.dark.600"); - } - /* Operator */ - .o { - color: theme("colors.blue.dark.700"); - } - /* OperatorWord */ - .ow { - color: theme("colors.amber.dark.700"); - } - /* Punctuation */ - .p { - color: theme("colors.gray.dark.500"); - } - /* Comment */ - .c { - color: theme("colors.gray.dark.500"); - } - /* CommentHashbang */ - .ch { - color: theme("colors.gray.dark.500"); - } - /* CommentMultiline */ - .cm { - color: theme("colors.gray.dark.500"); - } - /* CommentSingle */ - .c1 { - color: theme("colors.gray.dark.500"); - } - /* CommentSpecial */ - .cs { - color: theme("colors.gray.dark.500"); - } - /* CommentPreproc */ - .cp { - color: theme("colors.gray.dark.500"); - } - /* CommentPreprocFile */ - .cpf { - color: theme("colors.gray.dark.500"); - } - /* Generic */ - .g { - color: theme("colors.white"); - } - /* GenericDeleted */ - .gd { - color: theme("colors.red.dark.500"); - } - /* GenericEmph */ - .ge { - color: theme("colors.white"); - } - /* GenericError */ - .gr { - color: theme("colors.red.dark.500"); - } - /* GenericHeading */ - .gh { - color: theme("colors.gray.dark.600"); - } - /* GenericInserted */ - .gi { - color: theme("colors.green.dark.500"); - } - /* GenericOutput */ - .go { - color: theme("colors.white"); - } - /* GenericPrompt */ - .gp { - user-select: none; - color: theme("colors.green.dark.400"); - } - /* GenericStrong */ - .gs { - color: theme("colors.white"); - } - /* GenericSubheading */ - .gu { - color: theme("colors.gray.dark.600"); - } - /* GenericTraceback */ - .gt { - color: theme("colors.red.dark.500"); - } - /* GenericUnderline */ - .gl { - color: theme("colors.white"); - text-decoration: underline; - } - /* TextWhitespace */ - .w { - color: theme("colors.gray.dark.100"); - } +@utility syntax-dark { + /* Other */ + .x { + color: var(--color-white-main); + } + /* Error */ + .err { + color: var(--color-red-500); + } + /* CodeLine */ + .cl { + color: var(--color-gray-200); + } + /* LineHighlight */ + .hl { + min-width: fit-content; + background-color: var(--color-gray-800); + } + /* LineNumbersTable */ + .lnt { + white-space: pre; + user-select: none; + margin-right: 0.4em; + padding: 0 0.4em 0 0.4em; + color: var(--color-gray-300); + } + /* LineNumbers */ + .ln { + white-space: pre; + user-select: none; + margin-right: 0.4em; + padding: 0 0.4em 0 0.4em; + color: var(--color-gray-900); + } + /* Line */ + .line { + display: flex; + } + /* Keyword */ + .k { + color: var(--color-yellow-700); + } + /* KeywordConstant */ + .kc { + color: var(--color-violet-300); + } + /* KeywordDeclaration */ + .kd { + color: var(--color-yellow-700); + } + /* KeywordNamespace */ + .kn { + color: var(--color-yellow-700); + } + /* KeywordPseudo */ + .kp { + color: var(--color-yellow-700); + } + /* KeywordReserved */ + .kr { + color: var(--color-yellow-700); + } + /* KeywordType */ + .kt { + color: var(--color-yellow-700); + } + /* Name */ + .n { + color: var(--color-violet-300); + } + /* NameAttribute */ + .na { + color: var(--color-yellow-700); + } + /* NameBuiltin */ + .nb { + color: var(--color-yellow-700); + } + /* NameBuiltinPseudo */ + .bp { + color: var(--color-violet-300); + } + /* NameClass */ + .nc { + color: var(--color-white-main); + } + /* NameConstant */ + .no { + color: var(--color-white-main); + } + /* NameDecorator */ + .nd { + color: var(--color-violet-300); + } + /* NameEntity */ + .ni { + color: var(--color-yellow-700); + } + /* NameException */ + .ne { + color: var(--color-red-700); + } + /* NameFunction */ + .nf { + color: var(--color-blue-400); + } + /* NameFunctionMagic */ + .fm { + color: var(--color-blue-400); + } + /* NameLabel */ + .nl { + color: var(--color-yellow-500); + } + /* NameNamespace */ + .nn { + color: var(--color-white-main); + } + /* NameOther */ + .nx { + color: var(--color-white-main); + } + /* NameProperty */ + .py { + color: var(--color-violet-300); + } + /* NameTag */ + .nt { + color: var(--color-green-300); + } + /* NameVariable */ + .nv { + color: var(--color-green-500); + } + /* NameVariableClass */ + .vc { + color: var(--color-violet-600); + } + /* NameVariableGlobal */ + .vg { + color: var(--color-violet-600); + } + /* NameVariableInstance */ + .vi { + color: var(--color-violet-600); + } + /* NameVariableMagic */ + .vm { + color: var(--color-violet-600); + } + /* Literal */ + .l { + color: var(--color-white-main); + } + /* LiteralDate */ + .ld { + color: var(--color-green-600); + } + /* LiteralString */ + .s { + color: var(--color-white-main); + } + /* LiteralStringAffix */ + .sa { + color: var(--color-green-600); + } + /* LiteralStringBacktick */ + .sb { + color: var(--color-green-600); + } + /* LiteralStringChar */ + .sc { + color: var(--color-green-600); + } + /* LiteralStringDelimiter */ + .dl { + color: var(--color-green-600); + } + /* LiteralStringDoc */ + .sd { + color: var(--color-green-600); + } + /* LiteralStringDouble */ + .s2 { + color: var(--color-green-600); + } + /* LiteralStringEscape */ + .se { + color: var(--color-white-main); + } + /* LiteralStringHeredoc */ + .sh { + color: var(--color-green-600); + } + /* LiteralStringInterpol */ + .si { + color: var(--color-green-600); + } + /* LiteralStringOther */ + .sx { + color: var(--color-green-600); + } + /* LiteralStringRegex */ + .sr { + color: var(--color-blue-400); + } + /* LiteralStringSingle */ + .s1 { + color: var(--color-green-600); + } + /* LiteralStringSymbol */ + .ss { + color: var(--color-blue-400); + } + /* LiteralNumber */ + .m { + color: var(--color-blue-400); + } + /* LiteralNumberBin */ + .mb { + color: var(--color-blue-400); + } + /* LiteralNumberFloat */ + .mf { + color: var(--color-blue-400); + } + /* LiteralNumberHex */ + .mh { + color: var(--color-blue-400); + } + /* LiteralNumberInteger */ + .mi { + color: var(--color-blue-400); + } + /* LiteralNumberIntegerLong */ + .il { + color: var(--color-blue-400); + } + /* LiteralNumberOct */ + .mo { + color: var(--color-blue-400); + } + /* Operator */ + .o { + color: var(--color-blue-200); + } + /* OperatorWord */ + .ow { + color: var(--color-yellow-700); + } + /* Punctuation */ + .p { + color: var(--color-gray-500); + } + /* Comment */ + .c { + color: var(--color-gray-500); + } + /* CommentHashbang */ + .ch { + color: var(--color-gray-500); + } + /* CommentMultiline */ + .cm { + color: var(--color-gray-500); + } + /* CommentSingle */ + .c1 { + color: var(--color-gray-500); + } + /* CommentSpecial */ + .cs { + color: var(--color-gray-500); + } + /* CommentPreproc */ + .cp { + color: var(--color-gray-500); + } + /* CommentPreprocFile */ + .cpf { + color: var(--color-gray-500); + } + /* Generic */ + .g { + color: var(--color-white-main); + } + /* GenericDeleted */ + .gd { + color: var(--color-red-500); + } + /* GenericEmph */ + .ge { + color: var(--color-white-main); + } + /* GenericError */ + .gr { + color: var(--color-red-500); + } + /* GenericHeading */ + .gh { + color: var(--color-gray-600); + } + /* GenericInserted */ + .gi { + color: var(--color-green-500); + } + /* GenericOutput */ + .go { + color: var(--color-white-main); + } + /* GenericPrompt */ + .gp { + user-select: none; + color: var(--color-green-500); + } + /* GenericStrong */ + .gs { + color: var(--color-white-main); + } + /* GenericSubheading */ + .gu { + color: var(--color-gray-600); + } + /* GenericTraceback */ + .gt { + color: var(--color-red-500); + } + /* GenericUnderline */ + .gl { + color: var(--color-white-main); + text-decoration: underline; + } + /* TextWhitespace */ + .w { + color: var(--color-gray-100); } } diff --git a/assets/css/syntax-light.css b/assets/css/syntax-light.css index ba0bb789f853..e9c3151d14fe 100644 --- a/assets/css/syntax-light.css +++ b/assets/css/syntax-light.css @@ -1,343 +1,337 @@ -@layer utilities { - .syntax-light { - /* Other */ - .x { - color: theme("colors.black"); - } - /* Error */ - .err { - color: theme("colors.red.light.500"); - } - /* CodeLine */ - .cl { - } - /* LineHighlight */ - .hl { - min-width: fit-content; - background-color: theme("colors.blue.light.100"); - } - .lntd:first-child .hl, - & > .chroma > code > .hl { - margin-left: -4px; - border-left: 4px solid theme("colors.blue.light.300"); - } - /* LineNumbersTable */ - .lnt { - white-space: pre; - user-select: none; - margin-right: 0.4em; - padding: 0 0.4em 0 0.4em; - color: theme("colors.gray.light.400"); - } - /* LineNumbers */ - .ln { - white-space: pre; - user-select: none; - margin-right: 0.4em; - padding: 0 0.4em 0 0.4em; - color: theme("colors.gray.light.400"); - } - /* Line */ - .line { - display: flex; - } - /* Keyword */ - .k { - color: theme("colors.amber.light.500"); - } - /* KeywordConstant */ - .kc { - color: theme("colors.violet.light.400"); - } - /* KeywordDeclaration */ - .kd { - color: theme("colors.amber.light.500"); - } - /* KeywordNamespace */ - .kn { - color: theme("colors.amber.light.500"); - } - /* KeywordPseudo */ - .kp { - color: theme("colors.amber.light.500"); - } - /* KeywordReserved */ - .kr { - color: theme("colors.amber.light.500"); - } - /* KeywordType */ - .kt { - color: theme("colors.amber.light.500"); - } - /* Name */ - .n { - color: theme("colors.violet.light.400"); - } - /* NameAttribute */ - .na { - color: theme("colors.amber.light.500"); - } - /* NameBuiltin */ - .nb { - color: theme("colors.amber.light.500"); - } - /* NameBuiltinPseudo */ - .bp { - color: theme("colors.violet.light.400"); - } - /* NameClass */ - .nc { - color: theme("colors.black"); - } - /* NameConstant */ - .no { - color: theme("colors.black"); - } - /* NameDecorator */ - .nd { - color: theme("colors.violet.light.400"); - } - /* NameEntity */ - .ni { - color: theme("colors.amber.light.500"); - } - /* NameException */ - .ne { - color: theme("colors.red.light.700"); - } - /* NameFunction */ - .nf { - color: theme("colors.blue.light.600"); - } - /* NameFunctionMagic */ - .fm { - color: theme("colors.blue.light.600"); - } - /* NameLabel */ - .nl { - color: theme("colors.amber.light.700"); - } - /* NameNamespace */ - .nn { - color: theme("colors.black"); - } - /* NameOther */ - .nx { - color: theme("colors.black"); - } - /* NameProperty */ - .py { - color: theme("colors.black"); - } - /* NameTag */ - .nt { - color: theme("colors.green.light.600"); - } - /* NameVariable */ - .nv { - color: theme("colors.black"); - } - /* NameVariableClass */ - .vc { - color: theme("colors.violet.light.600"); - } - /* NameVariableGlobal */ - .vg { - color: theme("colors.violet.light.600"); - } - /* NameVariableInstance */ - .vi { - color: theme("colors.violet.light.600"); - } - /* NameVariableMagic */ - .vm { - color: theme("colors.violet.light.600"); - } - /* Literal */ - .l { - color: theme("colors.black"); - } - /* LiteralDate */ - .ld { - color: theme("colors.black"); - } - /* LiteralString */ - .s { - color: theme("colors.black"); - } - /* LiteralStringAffix */ - .sa { - color: theme("colors.green.light.600"); - } - /* LiteralStringBacktick */ - .sb { - color: theme("colors.green.light.600"); - } - /* LiteralStringChar */ - .sc { - color: theme("colors.green.light.600"); - } - /* LiteralStringDelimiter */ - .dl { - color: theme("colors.green.light.600"); - } - /* LiteralStringDoc */ - .sd { - color: #8f5902; - } - /* LiteralStringDouble */ - .s2 { - color: theme("colors.green.light.600"); - } - /* LiteralStringEscape */ - .se { - color: theme("colors.black"); - } - /* LiteralStringHeredoc */ - .sh { - color: theme("colors.green.light.600"); - } - /* LiteralStringInterpol */ - .si { - color: theme("colors.green.light.600"); - } - /* LiteralStringOther */ - .sx { - color: theme("colors.green.light.600"); - } - /* LiteralStringRegex */ - .sr { - color: theme("colors.blue.light.500"); - } - /* LiteralStringSingle */ - .s1 { - color: theme("colors.green.light.600"); - } - /* LiteralStringSymbol */ - .ss { - color: theme("colors.green.light.600"); - } - /* LiteralNumber */ - .m { - color: theme("colors.blue.light.600"); - } - /* LiteralNumberBin */ - .mb { - color: theme("colors.blue.light.600"); - } - /* LiteralNumberFloat */ - .mf { - color: theme("colors.blue.light.600"); - } - /* LiteralNumberHex */ - .mh { - color: theme("colors.blue.light.600"); - } - /* LiteralNumberInteger */ - .mi { - color: theme("colors.blue.light.600"); - } - /* LiteralNumberIntegerLong */ - .il { - color: theme("colors.blue.light.600"); - } - /* LiteralNumberOct */ - .mo { - color: theme("colors.blue.light.600"); - } - /* Operator */ - .o { - color: theme("colors.blue.light.400"); - } - /* OperatorWord */ - .ow { - color: theme("colors.amber.light.500"); - } - /* Punctuation */ - .p { - color: theme("colors.gray.light.400"); - } - /* Comment */ - .c { - color: theme("colors.gray.light.400"); - } - /* CommentHashbang */ - .ch { - color: theme("colors.gray.light.400"); - } - /* CommentMultiline */ - .cm { - color: theme("colors.gray.light.400"); - } - /* CommentSingle */ - .c1 { - color: theme("colors.gray.light.400"); - } - /* CommentSpecial */ - .cs { - color: theme("colors.gray.light.400"); - } - /* CommentPreproc */ - .cp { - color: theme("colors.gray.light.400"); - } - /* CommentPreprocFile */ - .cpf { - color: theme("colors.gray.light.400"); - } - /* Generic */ - .g { - color: theme("colors.black"); - } - /* GenericDeleted */ - .gd { - color: theme("colors.red.light.500"); - } - /* GenericEmph */ - .ge { - color: theme("colors.black"); - } - /* GenericError */ - .gr { - color: theme("colors.red.light.500"); - } - /* GenericHeading */ - .gh { - color: theme("colors.gray.light.600"); - } - /* GenericInserted */ - .gi { - color: theme("colors.green.light.500"); - } - /* GenericOutput */ - .go { - color: theme("colors.black"); - } - /* GenericPrompt */ - .gp { - user-select: none; - color: theme("colors.green.light.400"); - } - /* GenericStrong */ - .gs { - color: theme("colors.black"); - } - /* GenericSubheading */ - .gu { - color: theme("colors.gray.light.600"); - } - /* GenericTraceback */ - .gt { - color: theme("colors.red.light.500"); - } - /* GenericUnderline */ - .gl { - color: theme("colors.black"); - text-decoration: underline; - } - /* TextWhitespace */ - .w { - color: theme("colors.gray.light.100"); - } +@utility syntax-light { + /* Other */ + .x { + color: var(--color-black-main); + } + /* Error */ + .err { + color: var(--color-red-500); + } + /* CodeLine */ + .cl { + color: var(--color-gray-700); + } + /* LineHighlight */ + .hl { + min-width: fit-content; + background-color: var(--color-gray-100); + } + /* LineNumbersTable */ + .lnt { + white-space: pre; + user-select: none; + margin-right: 0.4em; + padding: 0 0.4em 0 0.4em; + color: var(--color-gray-400); + } + /* LineNumbers */ + .ln { + white-space: pre; + user-select: none; + margin-right: 0.4em; + padding: 0 0.4em 0 0.4em; + color: var(--color-gray-400); + } + /* Line */ + .line { + display: flex; + } + /* Keyword */ + .k { + color: var(--color-yellow-700); + } + /* KeywordConstant */ + .kc { + color: var(--color-violet-400); + } + /* KeywordDeclaration */ + .kd { + color: var(--color-yellow-700); + } + /* KeywordNamespace */ + .kn { + color: var(--color-yellow-700); + } + /* KeywordPseudo */ + .kp { + color: var(--color-yellow-700); + } + /* KeywordReserved */ + .kr { + color: var(--color-yellow-700); + } + /* KeywordType */ + .kt { + color: var(--color-yellow-700); + } + /* Name */ + .n { + color: var(--color-violet-400); + } + /* NameAttribute */ + .na { + color: var(--color-yellow-700); + } + /* NameBuiltin */ + .nb { + color: var(--color-yellow-800); + } + /* NameBuiltinPseudo */ + .bp { + color: var(--color-violet-400); + } + /* NameClass */ + .nc { + color: var(--color-black-main); + } + /* NameConstant */ + .no { + color: var(--color-black-main); + } + /* NameDecorator */ + .nd { + color: var(--color-violet-400); + } + /* NameEntity */ + .ni { + color: var(--color-yellow-700); + } + /* NameException */ + .ne { + color: var(--color-red-700); + } + /* NameFunction */ + .nf { + color: var(--color-blue-500); + } + /* NameFunctionMagic */ + .fm { + color: var(--color-blue-500); + } + /* NameLabel */ + .nl { + color: var(--color-yellow-700); + } + /* NameNamespace */ + .nn { + color: var(--color-black-main); + } + /* NameOther */ + .nx { + color: var(--color-black-main); + } + /* NameProperty */ + .py { + color: var(--color-black-main); + } + /* NameTag */ + .nt { + color: var(--color-blue-400); + } + /* NameVariable */ + .nv { + color: var(--color-black-main); + } + /* NameVariableClass */ + .vc { + color: var(--color-violet-600); + } + /* NameVariableGlobal */ + .vg { + color: var(--color-violet-600); + } + /* NameVariableInstance */ + .vi { + color: var(--color-violet-600); + } + /* NameVariableMagic */ + .vm { + color: var(--color-violet-600); + } + /* Literal */ + .l { + color: var(--color-black-main); + } + /* LiteralDate */ + .ld { + color: var(--color-black-main); + } + /* LiteralString */ + .s { + color: var(--color-black-main); + } + /* LiteralStringAffix */ + .sa { + color: var(--color-green-700); + } + /* LiteralStringBacktick */ + .sb { + color: var(--color-green-700); + } + /* LiteralStringChar */ + .sc { + color: var(--color-green-700); + } + /* LiteralStringDelimiter */ + .dl { + color: var(--color-green-700); + } + /* LiteralStringDoc */ + .sd { + color: #8f5902; + } + /* LiteralStringDouble */ + .s2 { + color: var(--color-green-700); + } + /* LiteralStringEscape */ + .se { + color: var(--color-black-main); + } + /* LiteralStringHeredoc */ + .sh { + color: var(--color-green-700); + } + /* LiteralStringInterpol */ + .si { + color: var(--color-green-700); + } + /* LiteralStringOther */ + .sx { + color: var(--color-green-700); + } + /* LiteralStringRegex */ + .sr { + color: var(--color-blue-500); + } + /* LiteralStringSingle */ + .s1 { + color: var(--color-green-700); + } + /* LiteralStringSymbol */ + .ss { + color: var(--color-green-700); + } + /* LiteralNumber */ + .m { + color: var(--color-blue-500); + } + /* LiteralNumberBin */ + .mb { + color: var(--color-blue-500); + } + /* LiteralNumberFloat */ + .mf { + color: var(--color-blue-500); + } + /* LiteralNumberHex */ + .mh { + color: var(--color-blue-500); + } + /* LiteralNumberInteger */ + .mi { + color: var(--color-blue-500); + } + /* LiteralNumberIntegerLong */ + .il { + color: var(--color-blue-500); + } + /* LiteralNumberOct */ + .mo { + color: var(--color-blue-500); + } + /* Operator */ + .o { + color: var(--color-blue-400); + } + /* OperatorWord */ + .ow { + color: var(--color-yellow-700); + } + /* Punctuation */ + .p { + color: var(--color-gray-400); + } + /* Comment */ + .c { + color: var(--color-gray-400); + } + /* CommentHashbang */ + .ch { + color: var(--color-gray-400); + } + /* CommentMultiline */ + .cm { + color: var(--color-gray-400); + } + /* CommentSingle */ + .c1 { + color: var(--color-gray-400); + } + /* CommentSpecial */ + .cs { + color: var(--color-gray-400); + } + /* CommentPreproc */ + .cp { + color: var(--color-gray-400); + } + /* CommentPreprocFile */ + .cpf { + color: var(--color-gray-400); + } + /* Generic */ + .g { + color: var(--color-black-main); + } + /* GenericDeleted */ + .gd { + color: var(--color-red-500); + } + /* GenericEmph */ + .ge { + color: var(--color-black-main); + } + /* GenericError */ + .gr { + color: var(--color-red-500); + } + /* GenericHeading */ + .gh { + color: var(--color-gray-600); + } + /* GenericInserted */ + .gi { + color: var(--color-green-500); + } + /* GenericOutput */ + .go { + color: var(--color-black-main); + } + /* GenericPrompt */ + .gp { + user-select: none; + color: var(--color-green-400); + } + /* GenericStrong */ + .gs { + color: var(--color-black-main); + } + /* GenericSubheading */ + .gu { + color: var(--color-gray-600); + } + /* GenericTraceback */ + .gt { + color: var(--color-red-500); + } + /* GenericUnderline */ + .gl { + color: var(--color-black-main); + text-decoration: underline; + } + /* TextWhitespace */ + .w { + color: var(--color-gray-100); } } diff --git a/assets/css/theme.css b/assets/css/theme.css new file mode 100644 index 000000000000..e6a1603b2239 --- /dev/null +++ b/assets/css/theme.css @@ -0,0 +1,206 @@ +@theme inline { + --font-sans: "roboto flex", sans-serif; + --font-mono: "roboto flex mono", ui-monospace, SFMono-Regular, monospace; + --default-font-family: var(--font-sans); + + --text-xs: 0.7143rem; + --text-xs--letter-spacing: 0.015em; + --text-xs--font-weight: 500; + --text-sm: 0.851rem; + --text-base: 14px; + --text-lg: 1.1429rem; + --text-lg--line-height: 1.75; + --text-xl: 1.2857rem; + --text-xl--letter-spacing: -0.015em; + --text-xl--font-weight: 500; + --text-2xl: 1.5rem; + --text-2xl--letter-spacing: -0.015em; + --text-2xl--font-weight: 500; + --text-3xl: 2rem; + --text-3xl--font-weight: 500; + --text-4xl: 2.5rem; + --text-4xl--letter-spacing: -0.015em; + --text-4xl--font-weight: 500; + + --color-background-light: #f9f9fa; + --color-background-dark: #10151b; + --color-primary-blue: var(--color-blue); + + --color-divider-light: hsla(0, 0%, 0%, 0.1); + --color-divider-dark: hsla(0, 0%, 100%, 0.05); + + --card-bg-dark: #1d262d; + --card-border-dark: #516980; + --card-bg-dark: var(--color-gray-900); + --card-border-dark: var(--color-gray-700); + + --color-navbar-bg: var(--color-background-light); + --color-navbar-bg-dark: var(--color-background-dark); + --color-navbar-text: var(--color-gray-700); + --color-navbar-text-dark: var(--tw-prose-body); + --color-navbar-border-color-light: var(--tw-prose-inverse-body); + --navbar-font-size: 0.92rem; + --navbar-group-font-title-size: 1rem; + --color-navbar-text-dark: var(--color-gray-200); + --color-navbar-group-text-dark: var(--tw-prose-body); + + --color-blue: var(--color-blue-400); + --color-blue-100: rgba(217, 229, 252, 1); + --color-blue-200: rgba(170, 196, 248, 1); + --color-blue-300: rgba(123, 164, 244, 1); + --color-blue-400: rgba(75, 131, 241, 1); + --color-blue-50: rgba(246, 248, 254, 1); + --color-blue-500: rgba(37, 96, 255, 1); + --color-blue-600: rgba(13, 77, 242, 1); + --color-blue-700: rgba(0, 61, 181, 1); + --color-blue-800: rgba(0, 41, 120, 1); + --color-blue-900: rgba(0, 29, 86, 1); + --color-blue-950: rgba(0, 21, 60, 1); + --color-blue-focus: rgba(37, 96, 255, 0.24); + --color-blue-focusvisible: rgba(37, 96, 255, 0.32); + --color-blue-hover: rgba(37, 96, 255, 0.12); + --color-blue-outlinedborder: rgba(37, 96, 255, 0.56); + --color-blue-selected: rgba(37, 96, 255, 0.16); + + --color-gray: var(--color-gray-600); + --color-gray-100: rgba(231, 234, 239, 1); + --color-gray-200: rgba(200, 207, 218, 1); + --color-gray-300: rgba(169, 180, 198, 1); + --color-gray-400: rgba(139, 153, 178, 1); + --color-gray-50: rgba(249, 250, 251, 1); + --color-gray-500: rgba(108, 126, 157, 1); + --color-gray-600: rgba(86, 101, 129, 1); + --color-gray-700: rgba(67, 76, 95, 1); + --color-gray-800: rgba(44, 51, 63, 1); + --color-gray-900: rgba(30, 33, 41, 1); + --color-gray-950: rgb(18, 21, 31); + --color-gray-focus: rgba(108, 126, 157, 0.24); + --color-gray-focusvisible: rgba(108, 126, 157, 0.32); + --color-gray-hover: rgba(108, 126, 157, 0.12); + --color-gray-outlinedborder: rgba(108, 126, 157, 0.56); + --color-gray-selected: rgba(108, 126, 157, 0.16); + + --color-green-100: rgba(235, 249, 238, 1); + --color-green-200: rgba(208, 241, 215, 1); + --color-green-300: rgba(169, 229, 189, 1); + --color-green-400: rgba(129, 217, 162, 1); + --color-green-50: rgba(245, 252, 247, 1); + --color-green-500: rgba(90, 206, 140, 1); + --color-green-600: rgba(56, 189, 125, 1); + --color-green-700: rgba(45, 149, 104, 1); + --color-green-800: rgba(33, 110, 75, 1); + --color-green-900: rgba(23, 75, 50, 1); + --color-green-950: rgba(17, 55, 26, 1); + --color-green-focus: rgba(56, 189, 125, 0.24); + --color-green-focusvisible: rgba(56, 189, 125, 0.32); + --color-green-hover: rgba(56, 189, 125, 0.12); + --color-green-outlinedborder: rgba(56, 189, 125, 0.56); + --color-green-selected: rgba(56, 189, 125, 0.16); + + --color-orange-100: rgba(255, 233, 217, 1); + --color-orange-200: rgba(255, 216, 187, 1); + --color-orange-300: rgba(255, 196, 153, 1); + --color-orange-400: rgba(255, 169, 107, 1); + --color-orange-50: rgba(255, 249, 245, 1); + --color-orange-500: rgba(255, 135, 49, 1); + --color-orange-600: rgba(255, 107, 0, 1); + --color-orange-700: rgba(218, 92, 0, 1); + --color-orange-800: rgba(173, 72, 0, 1); + --color-orange-900: rgba(137, 58, 1, 1); + --color-orange-950: rgba(94, 40, 0, 1); + --color-orange-focus: rgba(255, 107, 0, 0.24); + --color-orange-focusvisible: rgba(255, 107, 0, 0.32); + --color-orange-hover: rgba(255, 107, 0, 0.12); + --color-orange-outlinedborder: rgba(255, 107, 0, 0.56); + --color-orange-selected: rgba(255, 107, 0, 0.16); + + --color-pink-100: rgba(255, 230, 251, 1); + --color-pink-200: rgba(255, 201, 246, 1); + --color-pink-300: rgba(255, 166, 240, 1); + --color-pink-400: rgba(252, 113, 220, 1); + --color-pink-50: rgba(255, 247, 254, 1); + --color-pink-500: rgba(237, 73, 199, 1); + --color-pink-600: rgba(201, 24, 171, 1); + --color-pink-700: rgba(171, 0, 137, 1); + --color-pink-800: rgba(131, 0, 105, 1); + --color-pink-900: rgba(109, 0, 81, 1); + --color-pink-950: rgba(85, 0, 51, 1); + --color-pink-focus: rgba(201, 24, 171, 0.24); + --color-pink-focusvisible: rgba(201, 24, 171, 0.32); + --color-pink-hover: rgba(201, 24, 171, 0.12); + --color-pink-outlinedborder: rgba(201, 24, 171, 0.56); + --color-pink-selected: rgba(201, 24, 171, 0.16); + + --color-red-100: rgba(255, 223, 223, 1); + --color-red-200: rgba(255, 194, 194, 1); + --color-red-300: rgba(255, 168, 168, 1); + --color-red-400: rgba(255, 117, 117, 1); + --color-red-50: rgba(255, 245, 245, 1); + --color-red-500: rgba(255, 87, 87, 1); + --color-red-600: rgba(244, 47, 57, 1); + --color-red-700: rgba(228, 12, 44, 1); + --color-red-800: rgba(179, 9, 9, 1); + --color-red-900: rgba(137, 0, 0, 1); + --color-red-950: rgba(110, 0, 0, 1); + --color-red-focus: rgba(244, 47, 57, 0.24); + --color-red-focusvisible: rgba(244, 47, 57, 0.32); + --color-red-hover: rgba(244, 47, 57, 0.12); + --color-red-outlinedborder: rgba(244, 47, 57, 0.56); + --color-red-selected: rgba(244, 47, 57, 0.16); + + --color-teal-100: rgba(223, 246, 246, 1); + --color-teal-200: rgba(195, 240, 241, 1); + --color-teal-300: rgba(160, 229, 232, 1); + --color-teal-400: rgba(106, 220, 222, 1); + --color-teal-50: rgba(243, 252, 252, 1); + --color-teal-500: rgba(47, 208, 210, 1); + --color-teal-600: rgba(27, 189, 191, 1); + --color-teal-700: rgba(44, 158, 160, 1); + --color-teal-800: rgba(24, 116, 115, 1); + --color-teal-900: rgba(18, 85, 85, 1); + --color-teal-950: rgba(9, 61, 61, 1); + --color-teal-focus: rgba(27, 189, 191, 0.24); + --color-teal-focusvisible: rgba(27, 189, 191, 0.32); + --color-teal-hover: rgba(27, 189, 191, 0.12); + --color-teal-outlinedborder: rgba(27, 189, 191, 0.56); + --color-teal-selected: rgba(27, 189, 191, 0.16); + + --color-violet: var(--color-violet-500); + --color-violet-100: rgba(239, 224, 255, 1); + --color-violet-200: rgba(211, 183, 255, 1); + --color-violet-300: rgba(174, 130, 255, 1); + --color-violet-400: rgba(152, 96, 255, 1); + --color-violet-50: rgba(252, 249, 255, 1); + --color-violet-500: rgba(125, 46, 255, 1); + --color-violet-600: rgba(109, 0, 235, 1); + --color-violet-700: rgba(87, 0, 187, 1); + --color-violet-800: rgba(69, 0, 147, 1); + --color-violet-900: rgba(55, 0, 118, 1); + --color-violet-950: rgba(37, 0, 80, 1); + --color-violet-focus: rgba(125, 46, 255, 0.24); + --color-violet-focusvisible: rgba(125, 46, 255, 0.32); + --color-violet-hover: rgba(125, 46, 255, 0.12); + --color-violet-outlinedborder: rgba(125, 46, 255, 0.56); + --color-violet-selected: rgba(125, 46, 255, 0.16); + + --color-white-main: rgba(255, 255, 255, 1); + --color-yellow-100: rgba(255, 245, 219, 1); + --color-yellow-200: rgba(255, 241, 204, 1); + --color-yellow-300: rgba(255, 232, 173, 1); + --color-yellow-400: rgba(255, 218, 122, 1); + --color-yellow-50: rgba(255, 251, 240, 1); + --color-yellow-500: rgba(255, 204, 72, 1); + --color-yellow-600: rgba(248, 182, 15, 1); + --color-yellow-700: rgba(235, 156, 0, 1); + --color-yellow-800: rgba(184, 110, 0, 1); + --color-yellow-900: rgba(133, 73, 0, 1); + --color-yellow-950: rgba(100, 55, 0, 1); + --color-yellow-focus: rgba(235, 156, 0, 0.24); + --color-yellow-focusvisible: rgba(235, 156, 0, 0.32); + --color-yellow-hover: rgba(235, 156, 0, 0.12); + --color-yellow-outlinedborder: rgba(235, 156, 0, 0.56); + --color-yellow-selected: rgba(235, 156, 0, 0.16); + + --tw-prose-code-bg: var(--color-gray-100); + --tw-prose-code-bg-dark: var(--color-gray-800); +} diff --git a/assets/css/toc.css b/assets/css/toc.css deleted file mode 100644 index 91ff92d7cd99..000000000000 --- a/assets/css/toc.css +++ /dev/null @@ -1,14 +0,0 @@ -@layer components { - #TableOfContents { - .toc a { - @apply block max-w-full truncate py-1 pl-2 hover:font-medium hover:no-underline; - &[aria-current="true"], - &:hover { - @apply border-l-2 border-l-gray-light bg-gradient-to-r from-gray-light-100 font-medium text-black dark:border-l-gray-dark dark:from-gray-dark-200 dark:text-white; - } - &:not([aria-current="true"]) { - @apply text-gray-light-600 hover:text-black dark:text-gray-dark-700 dark:hover:text-white; - } - } - } -} diff --git a/assets/css/typography.css b/assets/css/typography.css deleted file mode 100644 index 008e7af70494..000000000000 --- a/assets/css/typography.css +++ /dev/null @@ -1,77 +0,0 @@ -@layer base { - - /* - * Font faces for Roboto Flex and Roboto Mono. - * - * - https://fonts.google.com/specimen/Roboto+Flex - * - https://fonts.google.com/specimen/Roboto+Mono - * - * The TTF fonts have been compressed to woff2, - * preserving the latin character subset. - * - * */ - - /* Roboto Flex */ - @font-face { - font-family: 'Roboto Flex'; - src: url('/assets/fonts/RobotoFlex.woff2') format('woff2'); - font-weight: 100 1000; /* Range of weights Roboto Flex supports */ - font-stretch: 100%; /* Range of width Roboto Flex supports */ - font-style: oblique 0deg 10deg; /* Range of oblique angle Roboto Flex supports */ - font-display: fallback; - } - - /* Roboto Mono */ - @font-face { - font-family: 'Roboto Mono'; - src: url('/assets/fonts/RobotoMono-Regular.woff2') format('woff2'); - font-weight: 100 700; /* Define the range of weight the variable font supports */ - font-style: normal; - font-display: fallback; - } - - /* Roboto Mono Italic */ - @font-face { - font-family: 'Roboto Mono'; - src: url('/assets/fonts/RobotoMono-Italic.woff2') format('woff2'); - font-weight: 100 700; /* Define the range of weight the variable font supports */ - font-style: italic; - font-display: fallback; - } - - .prose { - li { - @apply my-2; - > :last-child, - > :first-child { - margin: 0; - } - } - a { - font-weight: 400; - } - hr { - @apply mb-4 mt-8; - } - h1 { - @apply my-4 text-4xl; - line-height: 1.167; - } - h2 { - @apply mb-4 mt-8 text-3xl; - line-height: 1.2; - } - h3 { - @apply text-2xl; - line-height: 1.167; - } - h4 { - @apply text-xl; - line-height: 1.235; - } - h5 { - @apply text-lg; - line-height: 1.75; - } - } -} diff --git a/assets/css/utilities.css b/assets/css/utilities.css new file mode 100644 index 000000000000..9b2ec25df0fd --- /dev/null +++ b/assets/css/utilities.css @@ -0,0 +1,266 @@ +@utility icon-xs { + svg { + font-size: 12px; + } +} + +@utility icon-sm { + svg { + font-size: 16px; + } +} + +@utility icon-lg { + svg { + font-size: 32px; + } +} + +@utility text-primary-blue { + color: var(--color-primary-blue); +} + +@utility link { + @apply text-blue no-underline dark:text-blue-400; + font-weight: inherit; + &:hover { + @apply underline underline-offset-3; + } +} + +@utility invertible { + @apply dark:hue-rotate-180 dark:invert dark:filter; +} + +@utility bg-pattern-blue { + background-color: rgba(255, 255, 255, 0.5); + background-image: url("/assets/images/bg-pattern-blue.webp"); + background-blend-mode: overlay; + background-size: cover; + background-repeat: no-repeat; + .dark & { + background-color: rgba(0, 0, 0, 0.741); + } +} + +@utility bg-pattern-purple { + background-color: rgba(255, 255, 255, 0.5); + background-image: url("/assets/images/bg-pattern-purple.webp"); + background-blend-mode: overlay; + background-size: cover; + background-repeat: no-repeat; + .dark & { + background-color: rgba(0, 0, 0, 0.741); + } +} + +@utility bg-background-toc { + background-color: var(--color-navbar-bg); + .dark & { + background-color: var(--color-navbar-bg-dark); + } +} + +@utility bg-pattern-verde { + background-color: rgba(255, 255, 255, 0.5); + background-image: url("/assets/images/bg-pattern-verde.webp"); + background-blend-mode: overlay; + background-size: cover; + background-repeat: no-repeat; + .dark & { + background-color: rgba(0, 0, 0, 0.741); + } +} + +@utility icon-svg { + svg { + font-size: 24px; + width: 1em; + height: 1em; + display: inline-block; + fill: currentColor; + } +} +@utility icon-svg-stroke { + svg { + font-size: 24px; + width: 1em; + height: 1em; + display: inline-block; + stroke: currentColor; + } +} + +@utility icon-xs { + svg { + font-size: 12px; + } +} + +@utility icon-sm { + svg { + font-size: 16px; + } +} + +@utility icon-lg { + svg { + font-size: 32px; + } +} + +@utility navbar-font { + font-size: var(--navbar-font-size); + color: var(--color-navbar-text); + .dark & { + color: var(--color-navbar-text-dark); + } +} + +@utility navbar-entry-margin { + @apply px-2 py-1; +} + +@utility navbar-group { + @apply mt-5; +} + +@utility navbar-entry-background-current { + @apply bg-gray-100 dark:bg-gray-900; +} +@utility navbar-group-font-title { + font-size: var(--color-navbar-group-font-title-size); + @apply pb-1.5 font-semibold uppercase; + color: var(--color-navbar-text); + .dark & { + color: var(--color-navbar-text-dark); + } +} + +@utility prose { + .highlight, + :not(pre) > code { + font-size: 0.875em; + border: 1px solid; + border-radius: 0.25rem; /* theme("spacing.1") fallback */ + border: none; + background: transparent; + &::before, + &::after { + content: none !important; + } + } + + :not(pre) > code { + background: var(--tw-prose-code-bg); + .dark & { + background: var(--tw-prose-code-bg-dark); + } + display: inline-block; + margin: 0; + font-weight: 400; + overflow-wrap: anywhere; + padding: 0 4px; + } + + table:not(.lntable) code { + overflow-wrap: unset; + white-space: nowrap; + } + + /* Indented code blocks */ + pre:not(.chroma) { + @apply my-4 overflow-x-auto p-3; + font-size: 0.875em; + border: 1px solid; + border-radius: 0.25rem; /* theme("spacing.1") fallback */ + background: var(--color-white-main); + border-color: var(--color-gray-300); + .dark & { + background: var(--color-gray-200); + border-color: var(--color-gray-400); + } + } + + .highlight { + @apply my-0 overflow-x-auto p-2; + + /* LineTableTD */ + .lntd { + vertical-align: top; + padding: 0; + margin: 0; + font-weight: 400; + padding: 0 4px; + &:first-child { + width: 0; + } + } + + /* LineTableTD */ + .lntd { + vertical-align: top; + padding: 0; + margin: 0; + border: 0; + } + /* LineTable */ + .lntable { + display: table; + width: 100%; + border-spacing: 0; + padding: 0; + margin: 0; + border: 0; + /* LineNumberColumnHighlight */ + .lntd:first-child .hl { + display: block; + } + } + } +} + +@utility section-card { + @apply flex h-full flex-col gap-2 rounded-sm border p-4 drop-shadow-xs hover:drop-shadow-lg; + @apply text-gray dark:text-gray-200; + @apply border-gray-100 bg-gray-50 hover:border-gray-200 dark:border-gray-600 dark:bg-gray-900 hover:dark:border-gray-500; +} + +@utility section-card-text { + @apply leading-snug text-gray-800 dark:text-gray-200; +} +@utility section-card-title { + @apply text-xl font-semibold text-gray-900 dark:text-gray-100; +} + +@utility sub-button { + @apply flex w-full items-center gap-2 rounded-sm px-2 py-2 text-left text-gray-600 transition-colors hover:bg-gray-50 dark:text-gray-100 dark:hover:bg-gray-800; +} + +@utility dropdown-base { + @apply rounded-sm border border-gray-300 bg-white text-gray-600 dark:border-gray-300 dark:bg-gray-900 dark:text-gray-100; +} + +@utility toc { + a { + @apply block max-w-full truncate py-1 pl-2 hover:font-medium hover:no-underline; + &[aria-current="true"], + &:hover { + @apply border-l-2 border-x-gray-200 bg-gradient-to-r from-gray-50 font-medium text-black dark:border-l-gray-300 dark:from-gray-900 dark:text-white; + } + &:not([aria-current="true"]) { + @apply text-gray-600 hover:text-black dark:text-gray-100 dark:hover:text-white; + } + } +} +@utility chip { + @apply border-divider-light dark:border-divider-dark inline-flex items-center gap-1 rounded-full border bg-gray-100 px-2 text-sm text-gray-800 select-none dark:bg-gray-700 dark:text-gray-200; +} + +@utility pagination-link { + @apply flex items-center justify-center rounded-sm p-2; +} + +@utility breadcrumbs { + font-size: 90%; +} diff --git a/assets/icons/AppleMac.svg b/assets/icons/AppleMac.svg new file mode 100644 index 000000000000..b218d8cdcafd --- /dev/null +++ b/assets/icons/AppleMac.svg @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/static/assets/icons/Compose.svg b/assets/icons/Compose.svg similarity index 100% rename from static/assets/icons/Compose.svg rename to assets/icons/Compose.svg diff --git a/assets/icons/Linux.svg b/assets/icons/Linux.svg new file mode 100644 index 000000000000..55554f63b637 --- /dev/null +++ b/assets/icons/Linux.svg @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/static/assets/icons/Scout.svg b/assets/icons/Scout.svg similarity index 100% rename from static/assets/icons/Scout.svg rename to assets/icons/Scout.svg diff --git a/assets/icons/Testcontainers.svg b/assets/icons/Testcontainers.svg new file mode 100644 index 000000000000..5d2d59fece26 --- /dev/null +++ b/assets/icons/Testcontainers.svg @@ -0,0 +1,6 @@ + + + + + + diff --git a/static/assets/icons/Whale.svg b/assets/icons/Whale.svg similarity index 100% rename from static/assets/icons/Whale.svg rename to assets/icons/Whale.svg diff --git a/assets/icons/Windows.svg b/assets/icons/Windows.svg new file mode 100644 index 000000000000..7244da36d971 --- /dev/null +++ b/assets/icons/Windows.svg @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/assets/icons/claude.svg b/assets/icons/claude.svg new file mode 100644 index 000000000000..e29f32825727 --- /dev/null +++ b/assets/icons/claude.svg @@ -0,0 +1 @@ +Claude \ No newline at end of file diff --git a/assets/icons/dhi.svg b/assets/icons/dhi.svg new file mode 100644 index 000000000000..1e716e8fdf73 --- /dev/null +++ b/assets/icons/dhi.svg @@ -0,0 +1,13 @@ + \ No newline at end of file diff --git a/assets/icons/go.svg b/assets/icons/go.svg new file mode 100644 index 000000000000..bfcca48ceda7 --- /dev/null +++ b/assets/icons/go.svg @@ -0,0 +1,10 @@ + + + + + + + \ No newline at end of file diff --git a/assets/icons/java.svg b/assets/icons/java.svg new file mode 100644 index 000000000000..590da12d96a6 --- /dev/null +++ b/assets/icons/java.svg @@ -0,0 +1,17 @@ + + + + + + + + + \ No newline at end of file diff --git a/static/assets/images/logo-build-cloud.svg b/assets/icons/logo-build-cloud.svg similarity index 100% rename from static/assets/images/logo-build-cloud.svg rename to assets/icons/logo-build-cloud.svg diff --git a/assets/icons/openai.svg b/assets/icons/openai.svg new file mode 100644 index 000000000000..50d94d6c1085 --- /dev/null +++ b/assets/icons/openai.svg @@ -0,0 +1 @@ +OpenAI \ No newline at end of file diff --git a/assets/icons/toolkit.svg b/assets/icons/toolkit.svg new file mode 100644 index 000000000000..ef4c016dc5c0 --- /dev/null +++ b/assets/icons/toolkit.svg @@ -0,0 +1,7 @@ + + + + + + + diff --git a/assets/theme/icons/edit.svg b/assets/theme/icons/edit.svg new file mode 100644 index 000000000000..2ee5ec5d2be3 --- /dev/null +++ b/assets/theme/icons/edit.svg @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/assets/theme/icons/issue.svg b/assets/theme/icons/issue.svg new file mode 100644 index 000000000000..eef2863fdf56 --- /dev/null +++ b/assets/theme/icons/issue.svg @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/content/_index.md b/content/_index.md index 5561dc158f35..a577e65a04c0 100644 --- a/content/_index.md +++ b/content/_index.md @@ -14,6 +14,17 @@ grid: url: "/desktop/use-desktop/" - text: "Release notes" url: "/desktop/release-notes/" + - title: Docker Hardened Images + icon: /icons/dhi.svg + description: | + Secure, minimal images for trusted software delivery. + links: + - text: "Overview" + url: "/dhi/" + - text: "Quickstart" + url: "/dhi/get-started/" + - text: "Use an image" + url: "/dhi/how-to/use/" - title: Docker Engine icon: developer_board description: | @@ -45,8 +56,8 @@ grid: url: "/build-cloud/" - text: "Setup" url: "/build-cloud/setup/" - - text: "Optimization" - url: "/build-cloud/optimization/" + - text: "Release notes" + url: "/build-cloud/release-notes/" - title: Docker Compose icon: polyline description: | @@ -135,4 +146,15 @@ grid: url: "https://testcontainers.com/cloud/docs/#getting-started" - text: "TCC for CI" url: "https://testcontainers.com/cloud/docs/#tcc-for-ci" + - title: Docker Offload + icon: cloud + description: | + Build and run containers in the cloud. + links: + - text: "Overview" + url: "/offload/" + - text: "Quickstart" + url: "/offload/quickstart/" + - text: "About Docker Offload" + url: "/offload/about/" --- diff --git a/content/contribute/components/call-outs.md b/content/contribute/components/call-outs.md index 24455b7837f9..231e896d2b2e 100644 --- a/content/contribute/components/call-outs.md +++ b/content/contribute/components/call-outs.md @@ -6,18 +6,25 @@ toc_max: 3 We support these broad categories of callouts: -- Alerts (Note, Tip, Important, Warning, Caution) -- Version callouts -- Experimental, which use the `{{%/* experimental */%}}` shortcode -- Restricted, which use the `{{%/* restricted */%}}` shortcode +- Alerts: Note, Tip, Important, Warning, Caution -The experimental and restricted shortcodes take a title as an argument. The -title is optional, defaults to "Experimental" or "Restricted" respectively, and -is displayed in the callout. +We also support summary bars, which represent a feature's required subscription, version, or Adminstrator role. +To add a summary bar: + +Add the feature name to the `/data/summary.yaml` file. Use the following attributes: + +| Attribute | Description | Possible values | +|----------------|--------------------------------------------------------|---------------------------------------------------------| +| `subscription` | Notes the subscription required to use the feature | All, Personal, Pro, Team, Business | +| `availability` | Notes what product development stage the feature is in | Experimental, Beta, Early Access, GA, Retired | +| `requires` | Notes what minimum version is required for the feature | No specific value, use a string to describe the version and link to relevant release notes | +| `for` | Notes if the feature is intended for IT Administrators | Administrators | + +Then, add the `summary-bar` shortcode on the page you want to add the summary bar to. Note, the feature name is case sensitive. The icons that appear in the summary bar are automatically rendered. ## Examples -{{< introduced buildx 0.16.0 >}} +{{< summary-bar feature_name="PKG installer" >}} > [!NOTE] > @@ -54,18 +61,10 @@ is displayed in the callout. For both of the following callouts, consult [the Docker release lifecycle](/release-lifecycle) for more information on when to use them. -{{% experimental title="Beta feature" %}} -The Builds view is currently in Beta. This feature may change or be removed from future releases. -{{% /experimental %}} - -{{% restricted %}} -Docker Scout is an [early access](/release-lifecycle/#early-access-ea) product. -{{% /restricted %}} - -## Formatting +## Formatting -```go -{{}} +```md +{{}} ``` ```html @@ -101,14 +100,4 @@ Docker Scout is an [early access](/release-lifecycle/#early-access-ea) product. > [!CAUTION] > > Here be dragons. -``` - -```go -{{%/* experimental title="Beta feature" */%}} -The Builds view is currently in Beta. This feature may change or be removed from future releases. -{{%/* /experimental */%}} - -{{%/* restricted */%}} -Docker Scout is an [early access](/release-lifecycle/#early-access-ea) product. -{{%/* /restricted */%}} -``` +``` \ No newline at end of file diff --git a/content/contribute/components/code-blocks.md b/content/contribute/components/code-blocks.md index f5611805ca5c..63f7b1ab3e31 100644 --- a/content/contribute/components/code-blocks.md +++ b/content/contribute/components/code-blocks.md @@ -13,7 +13,7 @@ we use often. If your example contains a placeholder value that's subject to change, use the format `<[A-Z_]+>` for the placeholder value: `` -```none +```text export name= ``` @@ -98,7 +98,7 @@ Use the `bash` language code block when you want to show a Bash script: ```bash #!/usr/bin/bash -echo "deb https://packages.docker.com/1.12/apt/repo ubuntu-trusty main" | sudo tee /etc/apt/sources.list.d/docker.list +echo "deb https://download.docker.com/linux/ubuntu noble stable" | sudo tee /etc/apt/sources.list.d/docker.list ``` If you want to show an interactive shell, use `console` instead. @@ -106,7 +106,7 @@ In cases where you use `console`, make sure to add a dollar character for the user sign: ```console -$ echo "deb https://packages.docker.com/1.12/apt/repo ubuntu-trusty main" | sudo tee /etc/apt/sources.list.d/docker.list +$ echo "deb https://download.docker.com/linux/ubuntu noble stable" | sudo tee /etc/apt/sources.list.d/docker.list ``` ## Go diff --git a/content/contribute/components/links.md b/content/contribute/components/links.md index 97f47fbd4fc9..d5fed2fd4a2c 100644 --- a/content/contribute/components/links.md +++ b/content/contribute/components/links.md @@ -6,18 +6,17 @@ toc_max: 3 ## Examples -- [External links](https://docker.com) open in a new tab -- [Internal links](links.md) open in the same tab +[External links](https://docker.com) and [internal links](links.md) both +open in the same tab. -You can use relative links, using source filenames, -or you can use absolute links for pages as they appear on the final site. +Use relative links, using source filenames. #### Links to auto-generated content -When you link to heading IDs in auto-generated pages, such as CLI reference content, -you won't get any help from your editor in resolving the anchor names. That's -because the pages are generated at build-time and your editor or LSP doesn't know -about them in advance. +When you link to heading IDs in auto-generated pages, such as CLI +reference content, you won't get any help from your editor in resolving the +anchor names. That's because the pages are generated at build-time and your +editor or LSP doesn't know about them in advance. ## Syntax diff --git a/content/contribute/file-conventions.md b/content/contribute/file-conventions.md index c2166ca2bf88..bb658b802cdf 100644 --- a/content/contribute/file-conventions.md +++ b/content/contribute/file-conventions.md @@ -37,7 +37,7 @@ following keys are supported. The title, description, and keywords are required. Here's an example of a valid (but contrived) page metadata. The order of the metadata elements in the front matter isn't important. -```liquid +```text --- description: Instructions for installing Docker Engine on Ubuntu keywords: requirements, apt, installation, ubuntu, install, uninstall, upgrade, update @@ -70,7 +70,7 @@ Splitting long lines (preferably up to 80 characters) can make it easier to prov If you want to add an entry to the sidebar, but you want the link to point somewhere else, you can use the `sidebar.goto` parameter. This is useful in combination with `build.render` set to `always`, which creates a pageless entry in the sidebar that links to another page. -```md +```text --- title: Dummy sidebar link build: diff --git a/content/contribute/ui.md b/content/contribute/ui.md index d2b72144ed7e..c83efd458ac4 100644 --- a/content/contribute/ui.md +++ b/content/contribute/ui.md @@ -1,45 +1,73 @@ --- title: UI elements in content -description: How to refer and interact with UI content -keywords: ui, contribute, style guide +description: How to refer to and write about UI elements in technical documentation. +keywords: ui, contribute, style guide, docker docs weight: 40 --- -This page contains information on how to write technical content that involves a user interface (UI). +Use this guide when writing documentation that refers to buttons, fields, menus, dialogs, or other user interface (UI) elements. It explains how to format UI terms, write task-focused instructions, and refer to common UI patterns consistently and clearly. -## Format names of UI elements +## Format UI element names -Always bold UI elements when referring to them by name. +Use bold formatting for the visible names of UI elements: -This includes names for buttons, menus, dialogs, windows, list items, or any other feature on the page that has a visible name. +- Buttons +- Dialogs +- Windows +- Tabs +- Menu items +- List items +- Form labels +- Section headings -Don't make an official feature name or product name bold, except when it directly refers to an element on the page that uses the name, such as a window title or button name. +For example: -In most cases, follow the capitalization as it appears on the page. However, if labels are inconsistent or they're all uppercase, use sentence case. +*Select **Create**, then fill out the **Name** field.* -## Focus on the task +Do not bold product names or features unless they appear exactly as a label in the UI. -When practical, state instructions in terms of what the user should accomplish, rather than focusing on the widgets and gestures. By avoiding reference to UI elements, you help the user understand the purpose of an instruction, and it can help future-proof procedures. +### Capitalization -|Correct |Incorrect | -|:-----------|:------------| -|Expand the **Advanced options** section | Select the zippy to expand the **Advanced options** section| +- Follow the capitalization as it appears in the UI. +- If UI labels are all uppercase or inconsistent, use sentence case in your docs for readability. +## Write task-focused instructions -## Refer to UI elements +When possible, guide users based on what they’re trying to do, not just what they should select. This makes docs more goal-oriented and adaptable to UI changes. -Don't use UI elements as if they were English verbs or nouns. +| Do this | Avoid this | +|----------------------------------|-------------------------------------------| +| Expand the **Advanced options** section. | Select the zippy to expand the **Advanced options** section. | +| Choose a base image for your container. | Select a dropdown and pick something. | -|Correct |Incorrect | -|:-----------|:------------| -|In the **Name** field, enter an account name. | **Name** the account.| -|To save the settings, select **Save**.| **Save** the settings.| -## Prepositions +## Use correct prepositions with UI elements -When documenting the UI, use the following prepositions. +Choose the right preposition based on the type of UI element you're referencing. -|Preposition |UI element | Example | -|:-----------|:------------|:-----------| -|in | dialogs
fields
lists
menus
panes
windows
| In the **Alert** dialog, select **OK**.
In the **Name** field, enter `wsfc-1`.
In the **Item** list, select **Desktop**.
In the **File** menu, click **Tools**.
In the **Metrics** pane, select **New**.
In the **Task** window, select **Start**. | -| on |pages
tabs
toolbars | On the **Create an instance** page, select **Add**.
On the **Edit** tab, select **Save**.
On the **Dashboard toolbar**, select **Edit**.
| +| Preposition | Use with... | Example | +|-------------|--------------------------------|---------| +| **in** | dialogs, fields, lists, menus, panes, windows | In the **Name** field, enter your project name. | +| **on** | pages, tabs, toolbars | On the **Settings** tab, select **General**. | + + +## Use consistent UI element terms + +Use these standard terms when referring to elements in Docker products: + +| Preferred Term | Use When Referring To... | +|---------------------|----------------------------------------------| +| **button** | A clickable action element (e.g., **Start**) | +| **field** | A place to enter text or select a value | +| **menu** / **menu item** | A drop-down or navigation option | +| **drop-down** | A drop-down menu item | +| **context switcher** | Specific to toggling on cloud mode | +| **tab** | A selectable view within a window or page | +| **dialog** | A popup window for confirmations or options | +| **section** | A logical grouping of content on a page | +| **list** / **list item** | A scrollable list of selectable entries | +| **toggle** | A binary control (on/off) | +| **checkbox** | A multi-select control | +| **tooltip** | Text that appears on hover | + +Finally, instead of saying “click the control,” say “select the **Create** button.” diff --git a/content/get-started/_index.md b/content/get-started/_index.md index e577696978b4..c0b9075646ae 100644 --- a/content/get-started/_index.md +++ b/content/get-started/_index.md @@ -28,6 +28,9 @@ params: description: Get guided through a 45-minute workshop to learn about Docker. link: /get-started/workshop/ icon: desk +aliases: + - /engine/tutorials/usingdocker/ + - /get-started/what-is-a-container/ --- If you're new to Docker, this section guides you through the essential resources to get started. diff --git a/content/get-started/docker-concepts/building-images/multi-stage-builds.md b/content/get-started/docker-concepts/building-images/multi-stage-builds.md index 0de903ee2067..c6336879c21d 100644 --- a/content/get-started/docker-concepts/building-images/multi-stage-builds.md +++ b/content/get-started/docker-concepts/building-images/multi-stage-builds.md @@ -325,9 +325,7 @@ Now that you have the project, you’re ready to create the `Dockerfile`. Your final image is just 428 MB, compared to the original build size of 880 MB. - By optimizing each stage and only including what's necessary, you were able to significantly reduce the - overall image size while still achieving the same functionality. This not only improves performance but - also makes your Docker images more lightweight, more secure, and easier to manage. + By optimizing each stage and only including what's necessary, you were able to significantly reduce the overall image size while still achieving the same functionality. This not only improves performance but also makes your Docker images more lightweight, more secure, and easier to manage. ## Additional resources diff --git a/content/get-started/docker-concepts/running-containers/overriding-container-defaults.md b/content/get-started/docker-concepts/running-containers/overriding-container-defaults.md index 5f06a26b8efd..cd441ca3d2a8 100644 --- a/content/get-started/docker-concepts/running-containers/overriding-container-defaults.md +++ b/content/get-started/docker-concepts/running-containers/overriding-container-defaults.md @@ -71,7 +71,7 @@ In this hands-on guide, you'll see how to use the `docker run` command to overri 1. [Download and install](/get-started/get-docker/) Docker Desktop. -### Run multiple instance of the Postgres database +### Run multiple instances of the Postgres database 1. Start a container using the [Postgres image](https://hub.docker.com/_/postgres) with the following command: diff --git a/content/get-started/docker-concepts/the-basics/what-is-a-container.md b/content/get-started/docker-concepts/the-basics/what-is-a-container.md index 89af955717ed..aeedde7fbd20 100644 --- a/content/get-started/docker-concepts/the-basics/what-is-a-container.md +++ b/content/get-started/docker-concepts/the-basics/what-is-a-container.md @@ -84,7 +84,7 @@ This container runs a web server that displays a simple website. When working wi When you launched the container, you exposed one of the container's ports onto your machine. Think of this as creating configuration to let you to connect through the isolated environment of the container. -For this container, the frontend is accessible on port `8080`. To open the website, select the link in the **Port(s)** column of your container or visit [http://localhost:8080](https://localhost:8080) in your browser. +For this container, the frontend is accessible on port `8080`. To open the website, select the link in the **Port(s)** column of your container or visit [http://localhost:8080](http://localhost:8080) in your browser. ![Screenshot of the landing page coming from the running container](images/access-the-frontend.webp?border) diff --git a/content/get-started/docker-concepts/the-basics/what-is-a-registry.md b/content/get-started/docker-concepts/the-basics/what-is-a-registry.md index f8731fdb5633..b9165d369dbe 100644 --- a/content/get-started/docker-concepts/the-basics/what-is-a-registry.md +++ b/content/get-started/docker-concepts/the-basics/what-is-a-registry.md @@ -19,7 +19,7 @@ Well, you can store your container images on your computer system, but what if y An image registry is a centralized location for storing and sharing your container images. It can be either public or private. [Docker Hub](https://hub.docker.com) is a public registry that anyone can use and is the default registry. -While Docker Hub is a popular option, there are many other available container registries available today, including [Amazon Elastic Container Registry(ECR)](https://aws.amazon.com/ecr/), [Azure Container Registry (ACR)](https://azure.microsoft.com/en-in/products/container-registry), and [Google Container Registry (GCR)](https://cloud.google.com/artifact-registry). You can even run your private registry on your local system or inside your organization. For example, Harbor, JFrog Artifactory, GitLab Container registry etc. +While Docker Hub is a popular option, there are many other available container registries available today, including [Amazon Elastic Container Registry (ECR)](https://aws.amazon.com/ecr/), [Azure Container Registry (ACR)](https://azure.microsoft.com/en-in/products/container-registry), and [Google Container Registry (GCR)](https://cloud.google.com/artifact-registry). You can even run your private registry on your local system or inside your organization. For example, Harbor, JFrog Artifactory, GitLab Container registry etc. ### Registry vs. repository diff --git a/content/get-started/docker-concepts/the-basics/what-is-docker-compose.md b/content/get-started/docker-concepts/the-basics/what-is-docker-compose.md index 5126fba3646b..90b1db134f2b 100644 --- a/content/get-started/docker-concepts/the-basics/what-is-docker-compose.md +++ b/content/get-started/docker-concepts/the-basics/what-is-docker-compose.md @@ -59,12 +59,13 @@ Follow the instructions to run the to-do list app on your system. When you run this command, you should see an output like this: ```console - [+] Running 4/4 - ✔ app 3 layers [⣿⣿⣿] 0B/0B Pulled 7.1s + [+] Running 5/5 + ✔ app 3 layers [⣿⣿⣿] 0B/0B Pulled 7.1s ✔ e6f4e57cc59e Download complete 0.9s ✔ df998480d81d Download complete 1.0s ✔ 31e174fedd23 Download complete 2.5s - [+] Running 2/4 + ✔ 43c47a581c29 Download complete 2.0s + [+] Running 4/4 ⠸ Network todo-list-app_default Created 0.3s ⠸ Volume "todo-list-app_todo-mysql-data" Created 0.3s ✔ Container todo-list-app-app-1 Started 0.3s @@ -102,7 +103,7 @@ Since this application was started using Docker Compose, it's easy to tear it al You'll see output similar to the following: ```console - [+] Running 2/2 + [+] Running 3/3 ✔ Container todo-list-app-mysql-1 Removed 2.9s ✔ Container todo-list-app-app-1 Removed 0.1s ✔ Network todo-list-app_default Removed 0.1s @@ -116,6 +117,8 @@ Since this application was started using Docker Compose, it's easy to tear it al > > ```console > docker compose down --volumes + > [+] Running 1/0 + > ✔ Volume todo-list-app_todo-mysql-data Removed > ``` 2. Alternatively, you can use the Docker Desktop GUI to remove the containers by selecting the application stack and selecting the **Delete** button. diff --git a/content/get-started/get-docker.md b/content/get-started/get-docker.md index 17217e7654fc..b34af06fa7b1 100644 --- a/content/get-started/get-docker.md +++ b/content/get-started/get-docker.md @@ -31,27 +31,25 @@ section and choose the best installation path for you. > employees OR more than $10 million USD in annual revenue) requires a [paid > subscription](https://www.docker.com/pricing/). +
{{< card title="Docker Desktop for Mac" description="A native application using the macOS sandbox security model that delivers all Docker tools to your Mac." link="/desktop/setup/install/mac-install/" - icon="/assets/images/apple_48.svg" >}} - -
+ icon="/icons/AppleMac.svg" >}} {{< card title="Docker Desktop for Windows" description="A native Windows application that delivers all Docker tools to your Windows computer." link="/desktop/setup/install/windows-install/" - icon="/assets/images/windows_48.svg" >}} - -
+ icon="/icons/Windows.svg" >}} {{< card title="Docker Desktop for Linux" description="A native Linux application that delivers all Docker tools to your Linux computer." link="/desktop/setup/install/linux/" - icon="/assets/images/linux_48.svg" >}} + icon="/icons/Linux.svg" >}} +
> [!NOTE] > diff --git a/content/get-started/introduction/get-docker-desktop.md b/content/get-started/introduction/get-docker-desktop.md index 123c0ef1cefe..5b6ff9d5e088 100644 --- a/content/get-started/introduction/get-docker-desktop.md +++ b/content/get-started/introduction/get-docker-desktop.md @@ -26,24 +26,22 @@ This guide will walk you through the installation process, enabling you to exper > Commercial use of Docker Desktop in larger enterprises (more than 250 > employees OR more than $10 million USD in annual revenue) requires a [paid subscription](https://www.docker.com/pricing/?_gl=1*1nyypal*_ga*MTYxMTUxMzkzOS4xNjgzNTM0MTcw*_ga_XJWPQMJYHQ*MTcxNjk4MzU4Mi4xMjE2LjEuMTcxNjk4MzkzNS4xNy4wLjA.). +
{{< card title="Docker Desktop for Mac" description="[Download (Apple Silicon)](https://desktop.docker.com/mac/main/arm64/Docker.dmg?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-mac-arm64) | [Download (Intel)](https://desktop.docker.com/mac/main/amd64/Docker.dmg?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-mac-amd64) | [Install instructions](/desktop/setup/install/mac-install)" - icon="/assets/images/apple_48.svg" >}} - -
+ icon="/icons/AppleMac.svg" >}} {{< card title="Docker Desktop for Windows" description="[Download](https://desktop.docker.com/win/main/amd64/Docker%20Desktop%20Installer.exe?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-windows) | [Install instructions](/desktop/setup/install/windows-install)" - icon="/assets/images/windows_48.svg" >}} - -
+ icon="/icons/Windows.svg" >}} {{< card title="Docker Desktop for Linux" description="[Install instructions](/desktop/setup/install/linux/)" - icon="/assets/images/linux_48.svg" >}} + icon="/icons/Linux.svg" >}} +
Once it's installed, complete the setup process and you're all set to run a Docker container. @@ -94,4 +92,3 @@ Docker Desktop simplifies container management for developers by streamlining th Now that you have Docker Desktop installed and ran your first container, it's time to start developing with containers. {{< button text="Develop with containers" url="develop-with-containers" >}} - diff --git a/content/get-started/resources.md b/content/get-started/resources.md index 985458384d69..133d27ecbef7 100644 --- a/content/get-started/resources.md +++ b/content/get-started/resources.md @@ -9,7 +9,7 @@ Docker and the broader community of Docker experts have put together many differ ## Docker Training -Expand your knowledge on all things Docker with [basic to advanced trainings from Docker experts](https://www.docker.com/resources/trainings/). +Expand your knowledge on all things Docker with [basic to advanced trainings from Docker experts](https://www.docker.com/trainings/). You can find recorded content at your own convenience, or register for a live session to participate in Q&A. diff --git a/content/get-started/workshop/02_our_app.md b/content/get-started/workshop/02_our_app.md index 9d05ba5fc112..563871f9c233 100644 --- a/content/get-started/workshop/02_our_app.md +++ b/content/get-started/workshop/02_our_app.md @@ -13,6 +13,7 @@ aliases: - /get-started/part2/ - /get-started/02_our_app/ - /guides/workshop/02_our_app/ + - /guides/walkthroughs/containerize-your-app/ --- For the rest of this guide, you'll be working with a simple todo diff --git a/content/get-started/workshop/03_updating_app.md b/content/get-started/workshop/03_updating_app.md index 50bce7e01332..63c062cc3710 100644 --- a/content/get-started/workshop/03_updating_app.md +++ b/content/get-started/workshop/03_updating_app.md @@ -100,7 +100,7 @@ To remove a container, you first need to stop it. Once it has stopped, you can r ## Summary -In this section, you learned how to update and rebuild a container, as well as how to stop and remove a container. +In this section, you learned how to update and rebuild an image, as well as how to stop and remove a container. Related information: - [docker CLI reference](/reference/cli/docker/) diff --git a/content/get-started/workshop/04_sharing_app.md b/content/get-started/workshop/04_sharing_app.md index 5ba3b09f065f..5537dff5c9ae 100644 --- a/content/get-started/workshop/04_sharing_app.md +++ b/content/get-started/workshop/04_sharing_app.md @@ -38,8 +38,15 @@ In the following image, you can see an example Docker command from Docker Hub. T ## Push the image -1. In the command line, run the `docker push` command that you see on Docker - Hub. Note that your command will have your Docker ID, not "docker". For example, `docker push YOUR-USER-NAME/getting-started`. +Let's try to push the image to Docker Hub. + +1. In the command line, run the following commmand: + + ```console + docker push docker/getting-started + ``` + + You'll see an error like this: ```console $ docker push docker/getting-started @@ -47,13 +54,17 @@ In the following image, you can see an example Docker command from Docker Hub. T An image does not exist locally with the tag: docker/getting-started ``` - Why did it fail? The push command was looking for an image named `docker/getting-started`, but - didn't find one. If you run `docker image ls`, you won't see one either. + This failure is expected because the image isn't tagged correctly yet. + Docker is looking for an image name `docker/getting started`, but your + local image is still named `getting-started`. - To fix this, you need to tag your existing image you've built to give it another name. + You can confirm this by running: -2. Sign in to Docker Hub using the command `docker login -u YOUR-USER-NAME`. + ```console + docker image ls + ``` +2. To fix this, first sign in to Docker Hub using your Docker ID: `docker login YOUR-USER-NAME`. 3. Use the `docker tag` command to give the `getting-started` image a new name. Replace `YOUR-USER-NAME` with your Docker ID. ```console diff --git a/content/get-started/workshop/05_persisting_data.md b/content/get-started/workshop/05_persisting_data.md index 60b0e17c2b76..43501ce516c7 100644 --- a/content/get-started/workshop/05_persisting_data.md +++ b/content/get-started/workshop/05_persisting_data.md @@ -114,7 +114,7 @@ You can create the volume and start the container using the CLI or Docker Deskto > ``` > > For more details about Git Bash's syntax differences, see - > [Working with Git Bash](/desktop/troubleshoot-and-support/troubleshoot/topics/#working-with-git-bash). + > [Working with Git Bash](/desktop/troubleshoot-and-support/troubleshoot/topics/#docker-commands-failing-in-git-bash). {{< /tab >}} diff --git a/content/get-started/workshop/07_multi_container.md b/content/get-started/workshop/07_multi_container.md index aab577adeb3a..e08b970bbd72 100644 --- a/content/get-started/workshop/07_multi_container.md +++ b/content/get-started/workshop/07_multi_container.md @@ -192,7 +192,7 @@ The todo app supports the setting of a few environment variables to specify MySQ > > While using env vars to set connection settings is generally accepted for development, it's highly discouraged > when running applications in production. Diogo Monica, a former lead of security at Docker, -> [wrote a fantastic blog post](https://diogomonica.com/2017/03/27/why-you-shouldnt-use-env-variables-for-secret-data/) +> [wrote a fantastic blog post](https://blog.diogomonica.com/2017/03/27/why-you-shouldnt-use-env-variables-for-secret-data/) > explaining why. > > A more secure mechanism is to use the secret support provided by your container orchestration framework. In most cases, diff --git a/content/guides/admin-set-up/comms-and-info-gathering.md b/content/guides/admin-set-up/comms-and-info-gathering.md index 1ad54fb855c0..ce5e03caffc4 100644 --- a/content/guides/admin-set-up/comms-and-info-gathering.md +++ b/content/guides/admin-set-up/comms-and-info-gathering.md @@ -24,9 +24,9 @@ Some companies may have more than one [Docker organization](/manuals/admin/organ ## Step three: Gather requirements -Through [Settings Management](/manuals/security/for-admins/hardened-desktop/settings-management/_index.md), Docker provides numerous configuration parameters that can be preset. The Docker organization owner, development lead, and infosec representative should review these settings to establish the company’s baseline configuration, including security features and [enforcing sign-in](/manuals/security/for-admins/enforce-sign-in/_index.md) for Docker Desktop users. Additionally, they should decide whether to take advantage of other Docker products, such as [Docker Scout](/manuals/scout/_index.md), which is included in the subscription. +Through [Settings Management](/manuals/enterprise/security/hardened-desktop/settings-management/_index.md), Docker provides numerous configuration parameters that can be preset. The Docker organization owner, development lead, and infosec representative should review these settings to establish the company’s baseline configuration, including security features and [enforcing sign-in](/manuals/enterprise/security/enforce-sign-in/_index.md) for Docker Desktop users. Additionally, they should decide whether to take advantage of other Docker products, such as [Docker Scout](/manuals/scout/_index.md), which is included in the subscription. -To view the parameters that can be preset, see [Configure Settings Management](/manuals/security/for-admins/hardened-desktop/settings-management/configure-json-file.md#step-two-configure-the-settings-you-want-to-lock-in). +To view the parameters that can be preset, see [Configure Settings Management](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md#step-two-configure-the-settings-you-want-to-lock-in). ## Optional step four: Meet with the Docker Implementation team diff --git a/content/guides/admin-set-up/finalize-plans-and-setup.md b/content/guides/admin-set-up/finalize-plans-and-setup.md index b810d3dfbc06..b1b0d235d4ee 100644 --- a/content/guides/admin-set-up/finalize-plans-and-setup.md +++ b/content/guides/admin-set-up/finalize-plans-and-setup.md @@ -6,9 +6,9 @@ weight: 20 ## Step one: Send finalized settings files to the MDM team -After reaching an agreement with the relevant teams about your baseline and security configurations as outlined in module one, configure Settings Management using either the [Docker Admin Console](/manuals/security/for-admins/hardened-desktop/settings-management/configure-admin-console.md) or an [`admin-settings.json` file](/manuals/security/for-admins/hardened-desktop/settings-management/configure-json-file.md). +After reaching an agreement with the relevant teams about your baseline and security configurations as outlined in module one, configure Settings Management using either the [Docker Admin Console](/manuals/enterprise/security/hardened-desktop/settings-management/configure-admin-console.md) or an [`admin-settings.json` file](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md). -Once the file is ready, collaborate with your MDM team to deploy your chosen settings, along with your chosen method for [enforcing sign-in](/manuals/security/for-admins/enforce-sign-in/_index.md). +Once the file is ready, collaborate with your MDM team to deploy your chosen settings, along with your chosen method for [enforcing sign-in](/manuals/enterprise/security/enforce-sign-in/_index.md). > [!IMPORTANT] > @@ -22,9 +22,9 @@ If you have more than one organization, it’s recommended that you either conso ### Set up single sign-on SSO domain verification -Single sign-on (SSO) lets developers authenticate using their identity providers (IdPs) to access Docker. SSO is available for a whole company, and all associated organizations, or an individual organization that has a Docker Business subscription. For more information, see the [documentation](/manuals/security/for-admins/single-sign-on/_index.md). +Single sign-on (SSO) lets developers authenticate using their identity providers (IdPs) to access Docker. SSO is available for a whole company, and all associated organizations, or an individual organization that has a Docker Business subscription. For more information, see the [documentation](/manuals/enterprise/security/single-sign-on/_index.md). -You can also enable [SCIM](/manuals/security/for-admins/provisioning/scim.md) for further automation of provisioning and deprovisioning of users. +You can also enable [SCIM](/manuals/enterprise/security/provisioning/scim.md) for further automation of provisioning and deprovisioning of users. ### Set up Docker product entitlements included in the subscription diff --git a/content/guides/admin-set-up/testing.md b/content/guides/admin-set-up/testing.md index 9ee6306764e9..e334c5a0ab64 100644 --- a/content/guides/admin-set-up/testing.md +++ b/content/guides/admin-set-up/testing.md @@ -10,14 +10,14 @@ You can test SSO and SCIM by signing in to Docker Desktop or Docker Hub with the > [!IMPORTANT] > -> Some users may need CLI based logins to Docker Hub, and for this they will need a [personal access token (PAT)](/manuals/security/for-developers/access-tokens.md). +> Some users may need CLI based logins to Docker Hub, and for this they will need a [personal access token (PAT)](/manuals/security/access-tokens.md). ## Test RAM and IAM > [!WARNING] > Be sure to communicate with your users before proceeding, as this step will impact all existing users signing into your Docker organization -If you plan to use [Registry Access Management (RAM)](/manuals/security/for-admins/hardened-desktop/registry-access-management.md) and/or [Image Access Management (IAM)](/manuals/security/for-admins/hardened-desktop/image-access-management.md), ensure your test developer signs in to Docker Desktop using their organization credentials. Once authenticated, have them attempt to pull an unauthorized image or one from a disallowed registry via the Docker CLI. They should receive an error message indicating that the registry is restricted by the organization. +If you plan to use [Registry Access Management (RAM)](/manuals/enterprise/security/hardened-desktop/registry-access-management.md) and/or [Image Access Management (IAM)](/manuals/enterprise/security/hardened-desktop/image-access-management.md), ensure your test developer signs in to Docker Desktop using their organization credentials. Once authenticated, have them attempt to pull an unauthorized image or one from a disallowed registry via the Docker CLI. They should receive an error message indicating that the registry is restricted by the organization. ## Deploy settings and enforce sign in to test group diff --git a/content/guides/admin-user-management/_index.md b/content/guides/admin-user-management/_index.md new file mode 100644 index 000000000000..746eaed94ed6 --- /dev/null +++ b/content/guides/admin-user-management/_index.md @@ -0,0 +1,42 @@ +--- +title: Mastering user and access management +summary: Simplify user access while ensuring security and efficiency in Docker. +description: A guide for managing roles, provisioning users, and optimizing Docker access with tools like SSO and activity logs. +tags: [admin] +params: + featured: true + time: 20 minutes + image: + resource_links: + - title: Overview of Administration in Docker + url: /admin/ + - title: Single sign-on + url: /security/for-admins/single-sign-on/ + - title: Onboard your organization + url: /admin/organization/onboard/ + - title: Roles and permissions + url: /security/for-admins/roles-and-permissions/ + - title: Insights + url: /admin/organization/insights/ + - title: Activity logs + url: /admin/organization/activity-logs/ +--- + +Managing roles and permissions is key to securing your Docker environment while enabling easy collaboration and operational efficiency. This guide walks IT administrators through the essentials of user and access management, offering strategies for assigning roles, provisioning users, and using tools like Activity logs and Insights to monitor and optimize Docker usage. + +## Who's this for? + +- IT teams: Tasked with configuring and maintaining secure user access. +- Security professionals: Focused on enforcing secure access practices. +- Project managers: Overseeing team collaboration and resource management. + +## What you'll learn + +- How to assess and manage Docker user access and align accounts with organizational needs. +- When to use team configurations for scalable access control. +- How to automate and streamline user provisioning with SSO, SCIM, and JIT. +- How to get the most out of Docker's monitoring tools. + +## Tools integration + +Okta, Entra ID SAML 2.0, Azure Connect (OIDC) diff --git a/content/guides/admin-user-management/audit-and-monitor.md b/content/guides/admin-user-management/audit-and-monitor.md new file mode 100644 index 000000000000..905dd7fbaabd --- /dev/null +++ b/content/guides/admin-user-management/audit-and-monitor.md @@ -0,0 +1,52 @@ +--- +title: Monitoring and insights +description: Track user actions, team workflows, and organizational trends with Activity logs and Insights to enhance security and productivity in Docker. +keywords: organizational insights, user management, access control, security, monitoring, admins +weight: 30 +--- + +Activity logs and Insights are useful tools for user and access management in Docker. They provide visibility into user actions, team workflows, and organizational trends, helping enhance security, ensure compliance, and boost productivity. + +## Activity logs + +Activity logs track events at the organization and repository levels, offering a clear view of activities like repository changes, team updates, and billing adjustments. + +It is available for Docker Team or Docker Business plans, with data retained for three months. + +### Key features + + - Change tracking: View what changed, who made the change, and when. + + - Comprehensive reporting: Monitor critical events such as repository creation, deletion, privacy changes, and role assignments. + +### Example scenarios + + - Audit trail for security: A repository’s privacy settings were updated unexpectedly. The activity logs reveal which user made the change and when, helping administrators address potential security risks. + + - Team collaboration review: Logs show which team members pushed updates to a critical repository, ensuring accountability during a development sprint. + + - Billing adjustments: Track who added or removed subscription seats to maintain budgetary control and compliance. + +For more information, see [Activity logs](/manuals/admin/organization/activity-logs.md). + +## Insights + +Insights provide data-driven views of Docker usage to improve team productivity and resource allocation. + +### Key benefits + + - Standardized environments: Ensure consistent configurations and enforce best practices across teams. + + - Improved visibility: Monitor metrics like Docker Desktop usage, builds, and container activity to understand team workflows and engagement. + + - Optimized resources: Track license usage and feature adoption to maximize the value of your Docker subscription. + +### Example scenarios + + - Usage trends: Identify underutilized licenses or resources, allowing reallocation to more active teams. + + - Build efficiency: Track average build times and success rates to pinpoint bottlenecks in development processes. + + - Container utilization: Analyze container activity across departments to ensure proper resource distribution and cost efficiency. + + For more information, see [Insights](/manuals/admin/organization/insights.md). diff --git a/content/guides/admin-user-management/onboard.md b/content/guides/admin-user-management/onboard.md new file mode 100644 index 000000000000..a76f2eb3796d --- /dev/null +++ b/content/guides/admin-user-management/onboard.md @@ -0,0 +1,80 @@ +--- +title: Onboarding and managing roles and permissions in Docker +description: Learn how to manage roles, invite members, and implement scalable access control in Docker for secure and efficient collaboration. +keywords: sso, scim, jit, invite members, docker hub, docker admin console, onboarding, security +weight: 20 +--- + +This page guides you through onboarding owners and members, and using tools like SSO and SCIM to future-proof onboarding going forward. + +## Step 1: Invite owners + +When you create a Docker organization, you automatically become its sole owner. While optional, adding additional owners can significantly ease the process of onboarding and managing your organization by distributing administrative responsibilities. It also ensures continuity and does not cause a blocker if the primary owner is unavailable. + +For detailed information on owners, see [Roles and permissions](/manuals/enterprise/security/roles-and-permissions.md). + +## Step 2: Invite members and assign roles + +Members are granted controlled access to resources and enjoy enhanced organizational benefits. When you invite members to join you Docker organization, you immediately assign them a role. + +### Benefits of inviting members + + - Enhanced visibility: Gain insights into user activity, making it easier to monitor access and enforce security policies. + + - Streamlined collaboration: Help members collaborate effectively by granting access to shared resources and repositories. + + - Improved resource management: Organize and track users within your organization, ensuring optimal allocation of resources. + + - Access to enhanced features: Members benefit from organization-wide perks, such as increased pull limits and access to premium Docker features. + + - Security control: Apply and enforce security settings at an organizational level, reducing risks associated with unmanaged accounts. + +For detailed information, see [Manage organization members](/manuals/admin/organization/members.md). + +## Step 3: Future-proof user management + +A robust, future-proof approach to user management combines automated provisioning, centralized authentication, and dynamic access control. Implementing these practices ensures a scalable, secure, and efficient environment. + +### Secure user authentication with single sign-on (SSO) + +Integrating Docker with your identity provider streamlines user access and enhances security. + +SSO: + + - Simplifies sign in, as users sign in with their organizational credentials. + + - Reduces password-related vulnerabilities. + + - Simplifies onboarding as it works seamlessly with SCIM and group mapping for automated provisioning. + +[SSO documentation](/manuals/enterprise/security/single-sign-on/_index.md). + +### Automate onboarding with SCIM and JIT provisioning + +Streamline user provisioning and role management with [SCIM](/manuals/enterprise/security/provisioning/scim.md) and [Just-in-Time (JIT) provisioning](/manuals/enterprise/security/provisioning/just-in-time.md). + +With SCIM you can: + + - Sync users and roles automatically with your identity provider. + + - Automate adding, updating, or removing users based on directory changes. + +With JIT provisioning you can: + + - Automatically add users upon first sign in based on [group mapping](#simplify-access-with-group-mapping). + + - Reduce overhead by eliminating pre-invite steps. + +### Simplify access with group mapping + +Group mapping automates permissions management by linking identity provider groups to Docker roles and teams. + +It also: + + - Reduces manual errors in role assignments. + + - Ensures consistent access control policies. + + - Help you scale permissions as teams grow or change. + +For more information on how it works, see [Group mapping](/manuals/enterprise/security/provisioning/group-mapping.md). diff --git a/content/guides/admin-user-management/setup.md b/content/guides/admin-user-management/setup.md new file mode 100644 index 000000000000..885ddd9720dd --- /dev/null +++ b/content/guides/admin-user-management/setup.md @@ -0,0 +1,53 @@ +--- +title: Setting up roles and permissions in Docker +description: A guide to securely managing access and collaboration in Docker through roles and teams. +keywords: Docker roles, permissions management, access control, IT administration, team collaboration, least privilege, security, Docker teams, role-based access +weight: 10 +--- + +With the right configurations, you can ensure your developers have easy access to necessary resources while preventing unauthorized access. This page guides you through identifying Docker users, so you can allocate subscription seats efficiently within your Docker organization, and assigning roles to align with your organization's structure. + +## Step 1: Identify your Docker users and accounts + +Before setting up roles and permissions, it’s important to have a clear understanding of who in your organization requires Docker access. Focus on gathering a comprehensive view of active users, their roles within projects, and how they interact with Docker resources. This process can be supported by tools like device management software or manual assessments. Encourage all users to update their Docker accounts to use organizational email addresses, ensuring seamless integration with your subscription. + +For steps on how you can do this, see [step 1 of onboarding your organization](/manuals/admin/organization/onboard.md). + +## Step 2: Assign roles strategically + +When you invite members to join you Docker organization, you assign them a role. + +Docker’s predefined roles offer flexibility for various organizational needs. Assigning roles effectively ensures a balance of accessibility and security. + +- Member: Non-administrative role. Members can view other members that are in the same organization. +- Editor: Partial administrative access to the organization. Editors can create, edit, and delete repositories. They can also edit an existing team's access permissions. +- Organization owner: Full organization administrative access. Organization owners can manage organization repositories, teams, members, settings, and billing. +- Company owner: In addition to the permissions of an organization owner, company owners can configure settings for their associated organizations. + +For more information, see [Roles and permissions](/manuals/enterprise/security/roles-and-permissions.md). + +### Enhancing with teams + +Teams in Docker provide a structured way to manage member access and they provide an additional level of permissions. They simplify permission management and enable consistent application of policies. + +- Organize users into teams aligned with projects, departments, or functional roles. This approach helps streamline resource allocation and ensures clarity in access control. + +- Assign permissions at the team level rather than individually. For instance, a development team might have "Read & Write" access to certain repositories, while a QA team has "Read-only" access. + +- As teams grow or responsibilities shift, you can easily update permissions or add new members, maintaining consistency without reconfiguring individual settings. + +For more information, see [Create and manage a team](/manuals/admin/organization/manage-a-team.md). + +### Example scenarios + +- Development teams: Assign the member role to developers, granting access to the repositories needed for coding and testing. + +- Team leads: Assign the editor role to team leads for resource management and repository control within their teams. + +- Organizational oversight: Restrict the organization owner or company owner roles to a select few trusted individuals responsible for billing and security settings. + +### Best practices + +- Apply the principle of least privilege. Assign users only the minimum permissions necessary for their roles. + +- Plan to conduct regulars reviews of role assignments to ensure they align with evolving team structures and organizational responsibilities. diff --git a/content/guides/agentic-ai.md b/content/guides/agentic-ai.md new file mode 100644 index 000000000000..73d7dbdd7ac3 --- /dev/null +++ b/content/guides/agentic-ai.md @@ -0,0 +1,394 @@ +--- +title: Build and run agentic AI applications with Docker +linktitle: Agentic AI applications +keywords: AI, Docker, Model Runner, MCP Toolkit, Docker Offload, AI agents, application development +summary: | + Learn how to create AI agent applications using Docker Model Runner, MCP Toolkit, and Docker Offload. +params: + tags: [AI] + time: 30 minutes +--- + +## Introduction + +Agentic applications are transforming how software gets built. These apps don't +just respond, they decide, plan, and act. They're powered by models, +orchestrated by agents, and integrated with APIs, tools, and services in real +time. + +All these new agentic applications, no matter what they do, share a common +architecture. It's a new kind of stack, built from three core components: + +- Models: These are your GPTs, CodeLlamas, Mistrals. They're doing the + reasoning, writing, and planning. They're the engine behind the intelligence. + +- Agent: This is where the logic lives. Agents take a goal, break it down, and + figure out how to get it done. They orchestrate everything. They talk to the + UI, the tools, the model, and the gateway. + +- MCP gateway: This is what links your agents to the outside world, including + APIs, tools, and services. It provides a standard way for agents to call + capabilities via the Model Context Protocol (MCP). + +Docker makes this AI-powered stack simpler, faster, and more secure by unifying +models, tool gateways, and cloud infrastructure into a developer-friendly +workflow that uses Docker Compose. + +![A diagram of the agentic stack](./images/agentic-ai-diagram.webp) + +This guide walks you through the core components of agentic development and +shows how Docker ties them all together with the following tools: + +- [Docker Model Runner](../manuals/ai/model-runner/_index.md) lets you run LLMs + locally with simple command and OpenAI-compatible APIs. +- [Docker MCP Catalog and + Toolkit](../manuals/ai/mcp-catalog-and-toolkit/_index.md) helps you discover + and securely run external tools, like APIs and databases, using the Model + Context Protocol (MCP). +- [Docker MCP Gateway](/ai/mcp-gateway/) lets you orchestrate and manage MCP servers. +- [Docker Offload](/offload/) provides a powerful, GPU-accelerated + environment to run your AI applications with the same Compose-based + workflow you use locally. +- [Docker Compose](/manuals/ai/compose/models-and-compose.md) is the tool that ties it all + together, letting you define and run multi-container applications with a + single file. + +For this guide, you'll start by running the app in Docker Offload, using the +same Compose workflow you're already familiar with. Then, if your machine +hardware supports it, you'll run the same app locally using the same workflow. +Finally, you'll dig into the Compose file, Dockerfile, and app to see how it all +works together. + +## Prerequisites + +To follow this guide, you need to: + + - [Install Docker Desktop 4.43 or later](../get-started/get-docker.md) + - [Enable Docker Model Runner](/manuals/ai/model-runner.md#enable-dmr-in-docker-desktop) + - [Join Docker Offload Beta](/offload/quickstart/) + +## Step 1: Clone the sample application + +You'll use an existing sample application that demonstrates how to connect a +model to an external tool using Docker's AI features. + +```console +$ git clone https://github.com/docker/compose-for-agents.git +$ cd compose-for-agents/adk/ +``` + +## Step 2: Run the application with Docker Offload + +You'll start by running the application in Docker Offload, which provides a +managed environment for running AI workloads. This is ideal if you want to +leverage cloud resources or if your local machine doesn't meet the hardware +requirements to run the model locally. Docker Offload includes support for +GPU-accelerated instances, making it ideal for compute-intensive workloads like +AI model inference. + +To run the application with Docker Offload, follow these steps: + +1. Sign in to the Docker Desktop Dashboard. +2. In a terminal, start Docker Offload by running the following command: + + ```console + $ docker offload start + ``` + + When prompted, choose the account you want to use for Docker Offload and select + **Yes** when prompted **Do you need GPU support?**. + +3. In the `adk/` directory of the cloned repository, run the following command + in a terminal to build and run the application: + + ```console + $ docker compose up + ``` + + The first time you run this command, Docker pulls the model from Docker Hub, + which may take some time. + + The application is now running with Docker Offload. Note that the Compose workflow + is the same when using Docker Offload as it is locally. You define your + application in a `compose.yaml` file, and then use `docker compose up` to build + and run it. + +4. Visit [http://localhost:8080](http://localhost:8080). Enter a correct or + incorrect fact in the prompt and hit enter. An agent searches DuckDuckGo to + verify it and another agent revises the output. + + ![Screenshot of the application](./images/agentic-ai-app.png) + +5. Press ctrl-c in the terminal to stop the application when you're done. + +6. Run the following command to stop Docker Offload: + + ```console + $ docker offload stop + ``` + +## Step 3: Optional. Run the application locally + +If your machine meets the necessary hardware requirements, you can run the +entire application stack locally using Docker Compose. This lets you test the +application end-to-end, including the model and MCP gateway, without needing to +run in the cloud. This particular example uses the [Gemma 3 4B +model](https://hub.docker.com/r/ai/gemma3) with a context size of `10000`. + +Hardware requirements: + - VRAM: 3.5 GB + - Storage: 2.31 GB + +If your machine exceeds those requirements, consider running the application with a larger +context size or a larger model to improve the agents performance. You can easily +update model and context size in the `compose.yaml` file. + +To run the application locally, follow these steps: + +1. In the `adk/` directory of the cloned repository, run the following command in a + terminal to build and run the application: + + ```console + $ docker compose up + ``` + + The first time you run this command, Docker pulls the + model from Docker Hub, which may take some time. + +2. Visit [http://localhost:8080](http://localhost:8080). Enter a correct or + incorrect fact in the prompt and hit enter. An agent searches DuckDuckGo to + verify it and another agent revises the output. + +3. Press ctrl-c in the terminal to stop the application when you're done. + +## Step 4: Review the application environment + +You can find the `compose.yaml` file in the `adk/` directory. Open it in a text +editor to see how the services are defined. + +```yaml {collapse=true,title=compose.yaml} +services: + adk: + build: + context: . + ports: + # expose port for web interface + - "8080:8080" + environment: + # point adk at the MCP gateway + - MCPGATEWAY_ENDPOINT=http://mcp-gateway:8811/sse + depends_on: + - mcp-gateway + models: + gemma3 : + endpoint_var: MODEL_RUNNER_URL + model_var: MODEL_RUNNER_MODEL + + mcp-gateway: + # mcp-gateway secures your MCP servers + image: docker/mcp-gateway:latest + use_api_socket: true + command: + - --transport=sse + # add any MCP servers you want to use + - --servers=duckduckgo + +models: + gemma3: + # pre-pull the model when starting Docker Model Runner + model: ai/gemma3:4B-Q4_0 + context_size: 10000 # 3.5 GB VRAM + # increase context size to handle search results + # context_size: 131000 # 7.6 GB VRAM +``` + +The app consists of three main components: + + - The `adk` service, which is the web application that runs the agentic AI + application. This service talks to the MCP gateway and model. + - The `mcp-gateway` service, which is the MCP gateway that connects the app + to external tools and services. + - The `models` block, which defines the model to use with the application. + +When you examine the `compose.yaml` file, you'll notice two notable elements for the model: + + - A service‑level `models` block in the `adk` service + - A top-level `models` block + +These two blocks together let Docker Compose automatically start and connect +your ADK web app to the specified LLM. + +> [!TIP] +> +> Looking for more models to use? Check out the [Docker AI Model +> Catalog](https://hub.docker.com/catalogs/models/). + +When examining the `compose.yaml` file, you'll notice the gateway service is a +Docker-maintained image, +[`docker/mcp-gateway:latest`](https://hub.docker.com/r/docker/agents_gateway). +This image is Docker's open source [MCP +Gateway](https://github.com/docker/docker-mcp/) that enables your application to +connect to MCP servers, which expose tools that models can call. In this +example, it uses the [`duckduckgo` MCP +server](https://hub.docker.com/mcp/server/duckduckgo/overview) to perform web +searches. + +> [!TIP] +> +> Looking for more MCP servers to use? Check out the [Docker MCP +> Catalog](https://hub.docker.com/catalogs/mcp/). + +With only a few lines of instructions in a Compose file, you're able to run and +connect all the necessary services of an agentic AI application. + +In addition to the Compose file, the Dockerfile and the +`entrypoint.sh` script it creates, play a role in wiring up the AI stack at build and +runtime. You can find the `Dockerfile` in the `adk/` directory. Open it in a +text editor. + +```dockerfile {collapse=true,title=Dockerfile} +# Use Python 3.11 slim image as base +FROM python:3.13-slim +ENV PYTHONUNBUFFERED=1 + +RUN pip install uv + +WORKDIR /app +# Install system dependencies +COPY pyproject.toml uv.lock ./ +RUN --mount=type=cache,target=/root/.cache/uv \ + UV_COMPILE_BYTECODE=1 UV_LINK_MODE=copy \ + uv pip install --system . +# Copy application code +COPY agents/ ./agents/ +RUN python -m compileall -q . + +COPY < +> **Acknowledgment** +> +> Docker extends its sincere gratitude to [Kristiyan Velkov](https://www.linkedin.com/in/kristiyan-velkov-763130b3/) for authoring this guide. As a Docker Captain and experienced Front-end engineer, his expertise in Docker, DevOps, and modern web development has made this resource essential for the community, helping developers navigate and optimize their Docker workflows. + +--- + +## What will you learn? + +In this guide, you will learn how to: + +- Containerize and run an Angular application using Docker. +- Set up a local development environment for Angular inside a container. +- Run tests for your Angular application within a Docker container. +- Configure a CI/CD pipeline using GitHub Actions for your containerized app. +- Deploy the containerized Angular application to a local Kubernetes cluster for testing and debugging. + +You'll start by containerizing an existing Angular application and work your way up to production-level deployments. + +--- + +## Prerequisites + +Before you begin, ensure you have a working knowledge of: + +- Basic understanding of [TypeScript](https://www.typescriptlang.org/) and [JavaScript](https://developer.mozilla.org/en-US/docs/Web/JavaScript). +- Familiarity with [Node.js](https://nodejs.org/en) and [npm](https://docs.npmjs.com/about-npm) for managing dependencies and running scripts. +- Familiarity with [Angular](https://angular.io/) fundamentals. +- Understanding of core Docker concepts such as images, containers, and Dockerfiles. If you're new to Docker, start with the [Docker basics](/get-started/docker-concepts/the-basics/what-is-a-container.md) guide. + +Once you've completed the Angular getting started modules, you’ll be fully prepared to containerize your own Angular application using the detailed examples and best practices outlined in this guide. \ No newline at end of file diff --git a/content/guides/angular/configure-github-actions.md b/content/guides/angular/configure-github-actions.md new file mode 100644 index 000000000000..d7d7576e022e --- /dev/null +++ b/content/guides/angular/configure-github-actions.md @@ -0,0 +1,323 @@ +--- +title: Automate your builds with GitHub Actions +linkTitle: Automate your builds with GitHub Actions +weight: 60 +keywords: CI/CD, GitHub( Actions), Angular +description: Learn how to configure CI/CD using GitHub Actions for your Angular application. + +--- + +## Prerequisites + +Complete all the previous sections of this guide, starting with [Containerize an Angular application](containerize.md). + +You must also have: +- A [GitHub](https://github.com/signup) account. +- A [Docker Hub](https://hub.docker.com/signup) account. + +--- + +## Overview + +In this section, you'll set up a CI/CD pipeline using [GitHub Actions](https://docs.github.com/en/actions) to automatically: + +- Build your Angular application inside a Docker container. +- Run tests in a consistent environment. +- Push the production-ready image to [Docker Hub](https://hub.docker.com). + +--- + +## Connect your GitHub repository to Docker Hub + +To enable GitHub Actions to build and push Docker images, you’ll securely store your Docker Hub credentials in your new GitHub repository. + +### Step 1: Generate Docker Hub Credentials and Set GitHub Secrets" + +1. Create a Personal Access Token (PAT) from [Docker Hub](https://hub.docker.com) + 1. Go to your **Docker Hub account → Account Settings → Security**. + 2. Generate a new Access Token with **Read/Write** permissions. + 3. Name it something like `docker-angular-sample`. + 4. Copy and save the token — you’ll need it in Step 4. + +2. Create a repository in [Docker Hub](https://hub.docker.com/repositories/) + 1. Go to your **Docker Hub account → Create a repository**. + 2. For the Repository Name, use something descriptive — for example: `angular-sample`. + 3. Once created, copy and save the repository name — you’ll need it in Step 4. + +3. Create a new [GitHub repository](https://github.com/new) for your Angular project + +4. Add Docker Hub credentials as GitHub repository secrets + + In your newly created GitHub repository: + + 1. Navigate to: + **Settings → Secrets and variables → Actions → New repository secret**. + + 2. Add the following secrets: + + | Name | Value | + |-------------------|--------------------------------| + | `DOCKER_USERNAME` | Your Docker Hub username | + | `DOCKERHUB_TOKEN` | Your Docker Hub access token (created in Step 1) | + | `DOCKERHUB_PROJECT_NAME` | Your Docker Project Name (created in Step 2) | + + These secrets allow GitHub Actions to authenticate securely with Docker Hub during automated workflows. + +5. Connect Your Local Project to GitHub + + Link your local project `docker-angular-sample` to the GitHub repository you just created by running the following command from your project root: + + ```console + $ git remote set-url origin https://github.com/{your-username}/{your-repository-name}.git + ``` + + >[!IMPORTANT] + >Replace `{your-username}` and `{your-repository}` with your actual GitHub username and repository name. + + To confirm that your local project is correctly connected to the remote GitHub repository, run: + + ```console + $ git remote -v + ``` + + You should see output similar to: + + ```console + origin https://github.com/{your-username}/{your-repository-name}.git (fetch) + origin https://github.com/{your-username}/{your-repository-name}.git (push) + ``` + + This confirms that your local repository is properly linked and ready to push your source code to GitHub. + +6. Push your source code to GitHub + + Follow these steps to commit and push your local project to your GitHub repository: + + 1. Stage all files for commit. + + ```console + $ git add -A + ``` + This command stages all changes — including new, modified, and deleted files — preparing them for commit. + + + 2. Commit the staged changes with a descriptive message. + + ```console + $ git commit -m "Initial commit" + ``` + This command creates a commit that snapshots the staged changes with a descriptive message. + + 3. Push the code to the `main` branch. + + ```console + $ git push -u origin main + ``` + This command pushes your local commits to the `main` branch of the remote GitHub repository and sets the upstream branch. + +Once completed, your code will be available on GitHub, and any GitHub Actions workflow you’ve configured will run automatically. + +> [!NOTE] +> Learn more about the Git commands used in this step: +> - [Git add](https://git-scm.com/docs/git-add) – Stage changes (new, modified, deleted) for commit +> - [Git commit](https://git-scm.com/docs/git-commit) – Save a snapshot of your staged changes +> - [Git push](https://git-scm.com/docs/git-push) – Upload local commits to your GitHub repository +> - [Git remote](https://git-scm.com/docs/git-remote) – View and manage remote repository URLs + +--- + +### Step 2: Set up the workflow + +Now you'll create a GitHub Actions workflow that builds your Docker image, runs tests, and pushes the image to Docker Hub. + +1. Go to your repository on GitHub and select the **Actions** tab in the top menu. + +2. Select **Set up a workflow yourself** when prompted. + + This opens an inline editor to create a new workflow file. By default, it will be saved to: + `.github/workflows/main.yml` + + +3. Add the following workflow configuration to the new file: + +```yaml +name: CI/CD – Angular Application with Docker + +on: + push: + branches: [main] + pull_request: + branches: [main] + types: [opened, synchronize, reopened] + +jobs: + build-test-push: + name: Build, Test, and Push Docker Image + runs-on: ubuntu-latest + + steps: + # 1. Checkout source code + - name: Checkout source code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + # 2. Set up Docker Buildx + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + # 3. Cache Docker layers + - name: Cache Docker layers + uses: actions/cache@v4 + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-buildx- + + # 4. Cache npm dependencies + - name: Cache npm dependencies + uses: actions/cache@v4 + with: + path: ~/.npm + key: ${{ runner.os }}-npm-${{ hashFiles('**/package-lock.json') }} + restore-keys: | + ${{ runner.os }}-npm- + + # 5. Extract metadata + - name: Extract metadata + id: meta + run: | + echo "REPO_NAME=${GITHUB_REPOSITORY##*/}" >> "$GITHUB_OUTPUT" + echo "SHORT_SHA=${GITHUB_SHA::7}" >> "$GITHUB_OUTPUT" + + # 6. Build dev Docker image + - name: Build Docker image for tests + uses: docker/build-push-action@v6 + with: + context: . + file: Dockerfile.dev + tags: ${{ steps.meta.outputs.REPO_NAME }}-dev:latest + load: true + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache,mode=max + + # 7. Run Angular tests with Jasmine + - name: Run Angular Jasmine tests inside container + run: | + docker run --rm \ + --workdir /app \ + --entrypoint "" \ + ${{ steps.meta.outputs.REPO_NAME }}-dev:latest \ + sh -c "npm ci && npm run test -- --ci --runInBand" + env: + CI: true + NODE_ENV: test + timeout-minutes: 10 + + # 8. Log in to Docker Hub + - name: Log in to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + # 9. Build and push production image + - name: Build and push production image + uses: docker/build-push-action@v6 + with: + context: . + file: Dockerfile + push: true + platforms: linux/amd64,linux/arm64 + tags: | + ${{ secrets.DOCKER_USERNAME }}/${{ secrets.DOCKERHUB_PROJECT_NAME }}:latest + ${{ secrets.DOCKER_USERNAME }}/${{ secrets.DOCKERHUB_PROJECT_NAME }}:${{ steps.meta.outputs.SHORT_SHA }} + cache-from: type=local,src=/tmp/.buildx-cache +``` + +This workflow performs the following tasks for your Angular application: +- Triggers on every `push` or `pull request` targeting the `main` branch. +- Builds a development Docker image using `Dockerfile.dev`, optimized for testing. +- Executes unit tests using Vitest inside a clean, containerized environment to ensure consistency. +- Halts the workflow immediately if any test fails — enforcing code quality. +- Caches both Docker build layers and npm dependencies for faster CI runs. +- Authenticates securely with Docker Hub using GitHub repository secrets. +- Builds a production-ready image using the `prod` stage in `Dockerfile`. +- Tags and pushes the final image to Docker Hub with both `latest` and short SHA tags for traceability. + +> [!NOTE] +> For more information about `docker/build-push-action`, refer to the [GitHub Action README](https://github.com/docker/build-push-action/blob/master/README.md). + +--- + +### Step 3: Run the workflow + +After you've added your workflow file, it's time to trigger and observe the CI/CD process in action. + +1. Commit and push your workflow file + + - Select "Commit changes…" in the GitHub editor. + + - This push will automatically trigger the GitHub Actions pipeline. + +2. Monitor the workflow execution + + - Go to the Actions tab in your GitHub repository. + - Click into the workflow run to follow each step: **build**, **test**, and (if successful) **push**. + +3. Verify the Docker image on Docker Hub + + - After a successful workflow run, visit your [Docker Hub repositories](https://hub.docker.com/repositories). + - You should see a new image under your repository with: + - Repository name: `${your-repository-name}` + - Tags include: + - `latest` – represents the most recent successful build; ideal for quick testing or deployment. + - `` – a unique identifier based on the commit hash, useful for version tracking, rollbacks, and traceability. + +> [!TIP] Protect your main branch +> To maintain code quality and prevent accidental direct pushes, enable branch protection rules: +> - Navigate to your **GitHub repo → Settings → Branches**. +> - Under Branch protection rules, click **Add rule**. +> - Specify `main` as the branch name. +> - Enable options like: +> - *Require a pull request before merging*. +> - *Require status checks to pass before merging*. +> +> This ensures that only tested and reviewed code is merged into `main` branch. +--- + +## Summary + +In this section, you set up a complete CI/CD pipeline for your containerized Angular application using GitHub Actions. + +Here's what you accomplished: + +- Created a new GitHub repository specifically for your project. +- Generated a secure Docker Hub access token and added it to GitHub as a secret. +- Defined a GitHub Actions workflow that: + - Build your application inside a Docker container. + - Run tests in a consistent, containerized environment. + - Push a production-ready image to Docker Hub if tests pass. +- Triggered and verified the workflow execution through GitHub Actions. +- Confirmed that your image was successfully published to Docker Hub. + +With this setup, your Angular application is now ready for automated testing and deployment across environments — increasing confidence, consistency, and team productivity. + +--- + +## Related resources + +Deepen your understanding of automation and best practices for containerized apps: + +- [Introduction to GitHub Actions](/guides/gha.md) – Learn how GitHub Actions automate your workflows +- [Docker Build GitHub Actions](/manuals/build/ci/github-actions/_index.md) – Set up container builds with GitHub Actions +- [Workflow syntax for GitHub Actions](https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions) – Full reference for writing GitHub workflows +- [Compose file reference](/compose/compose-file/) – Full configuration reference for `compose.yaml` +- [Best practices for writing Dockerfiles](/develop/develop-images/dockerfile_best-practices/) – Optimize your image for performance and security + +--- + +## Next steps + +Next, learn how you can locally test and debug your Angular workloads on Kubernetes before deploying. This helps you ensure your application behaves as expected in a production-like environment, reducing surprises during deployment. diff --git a/content/guides/angular/containerize.md b/content/guides/angular/containerize.md new file mode 100644 index 000000000000..18603ecbaa5a --- /dev/null +++ b/content/guides/angular/containerize.md @@ -0,0 +1,503 @@ +--- +title: Containerize an Angular Application +linkTitle: Containerize +weight: 10 +keywords: angular, node, image, initialize, build +description: Learn how to containerize an Angular application with Docker by creating an optimized, production-ready image using best practices for performance, security, and scalability. + +--- + +## Prerequisites + +Before you begin, make sure the following tools are installed and available on your system: + +- You have installed the latest version of [Docker Desktop](/get-started/get-docker.md). +- You have a [git client](https://git-scm.com/downloads). The examples in this section use a command-line based git client, but you can use any client. + +> **New to Docker?** +> Start with the [Docker basics](/get-started/docker-concepts/the-basics/what-is-a-container.md) guide to get familiar with key concepts like images, containers, and Dockerfiles. + +--- + +## Overview + +This guide walks you through the complete process of containerizing an Angular application with Docker. You’ll learn how to create a production-ready Docker image using best practices that improve performance, security, scalability, and deployment efficiency. + +By the end of this guide, you will: + +- Containerize an Angular application using Docker. +- Create and optimize a Dockerfile for production builds. +- Use multi-stage builds to minimize image size. +- Serve the application efficiently with a custom NGINX configuration. +- Build secure and maintainable Docker images by following best practices. + +--- + +## Get the sample application + +Clone the sample application to use with this guide. Open a terminal, navigate to the directory where you want to work, and run the following command +to clone the git repository: + +```console +$ git clone https://github.com/kristiyan-velkov/docker-angular-sample +``` +--- + +## Generate a Dockerfile + +Docker provides an interactive CLI tool called `docker init` that helps scaffold the necessary configuration files for containerizing your application. This includes generating a `Dockerfile`, `.dockerignore`, `compose.yaml`, and `README.Docker.md`. + +To begin, navigate to the root of your project directory: + +```console +$ cd docker-angular-sample +``` + +Then run the following command: + +```console +$ docker init +``` +You’ll see output similar to: + +```text +Welcome to the Docker Init CLI! + +This utility will walk you through creating the following files with sensible defaults for your project: + - .dockerignore + - Dockerfile + - compose.yaml + - README.Docker.md + +Let's get started! +``` + +The CLI will prompt you with a few questions about your app setup. +For consistency, please use the same responses shown in the example below when prompted: +| Question | Answer | +|------------------------------------------------------------|-----------------| +| What application platform does your project use? | Node | +| What version of Node do you want to use? | 23.11.0-alpine | +| Which package manager do you want to use? | npm | +| Do you want to run "npm run build" before starting server? | yes | +| What directory is your build output to? | dist | +| What command do you want to use to start the app? | npm run start | +| What port does your server listen on? | 8080 | + +After completion, your project directory will contain the following new files: + +```text +├── docker-angular-sample/ +│ ├── Dockerfile +│ ├── .dockerignore +│ ├── compose.yaml +│ └── README.Docker.md +``` + +--- + +## Build the Docker image + +The default Dockerfile generated by `docker init` serves as a solid starting point for general Node.js applications. However, Angular is a front-end framework that compiles into static assets, so we need to tailor the Dockerfile to optimize for how Angular applications are built and served in a production environment. + +### Step 1: Improve the generated Dockerfile and configuration + +In this step, you’ll improve the Dockerfile and configuration files by following best practices: + +- Use multi-stage builds to keep the final image clean and small +- Serve the app using NGINX, a fast and secure web server +- Improve performance and security by only including what’s needed + +These updates help ensure your app is easy to deploy, fast to load, and production-ready. + +> [!NOTE] +> A `Dockerfile` is a plain text file that contains step-by-step instructions to build a Docker image. It automates packaging your application along with its dependencies and runtime environment. +> For full details, see the [Dockerfile reference](/reference/dockerfile/). + + +### Step 2: Configure the Dockerfile + +Copy and replace the contents of your existing `Dockerfile` with the configuration below: + +```dockerfile +# ========================================= +# Stage 1: Build the Angular Application +# ========================================= +# ========================================= +# Stage 1: Build the Angular Application +# ========================================= +ARG NODE_VERSION=22.14.0-alpine +ARG NGINX_VERSION=alpine3.21 + +# Use a lightweight Node.js image for building (customizable via ARG) +FROM node:${NODE_VERSION} AS builder + +# Set the working directory inside the container +WORKDIR /app + +# Copy package-related files first to leverage Docker's caching mechanism +COPY package.json package-lock.json ./ + +# Install project dependencies using npm ci (ensures a clean, reproducible install) +RUN --mount=type=cache,target=/root/.npm npm ci + +# Copy the rest of the application source code into the container +COPY . . + +# Build the Angular application +RUN npm run build + +# ========================================= +# Stage 2: Prepare Nginx to Serve Static Files +# ========================================= + +FROM nginxinc/nginx-unprivileged:${NGINX_VERSION} AS runner + +# Use a built-in non-root user for security best practices +USER nginx + +# Copy custom Nginx config +COPY nginx.conf /etc/nginx/nginx.conf + +# Copy the static build output from the build stage to Nginx's default HTML serving directory +COPY --chown=nginx:nginx --from=builder /app/dist/*/browser /usr/share/nginx/html + +# Expose port 8080 to allow HTTP traffic +# Note: The default NGINX container now listens on port 8080 instead of 80 +EXPOSE 8080 + +# Start Nginx directly with custom config +ENTRYPOINT ["nginx", "-c", "/etc/nginx/nginx.conf"] +CMD ["-g", "daemon off;"] + +``` + +> [!NOTE] +> We are using nginx-unprivileged instead of the standard NGINX image to follow security best practices. +> Running as a non-root user in the final image: +>- Reduces the attack surface +>- Aligns with Docker’s recommendations for container hardening +>- Helps comply with stricter security policies in production environments + +### Step 3: Configure the .dockerignore file + +The `.dockerignore` file tells Docker which files and folders to exclude when building the image. + +> [!NOTE] +>This helps: +>- Reduce image size +>- Speed up the build process +>- Prevent sensitive or unnecessary files (like `.env`, `.git`, or `node_modules`) from being added to the final image. +> +> To learn more, visit the [.dockerignore reference](/reference/dockerfile.md#dockerignore-file). + +Copy and replace the contents of your existing `.dockerignore` with the configuration below: + +```dockerignore +# ================================ +# Node and build output +# ================================ +node_modules +dist +out-tsc +.angular +.cache +.tmp + +# ================================ +# Testing & Coverage +# ================================ +coverage +jest +cypress +cypress/screenshots +cypress/videos +reports +playwright-report +.vite +.vitepress + +# ================================ +# Environment & log files +# ================================ +*.env* +!*.env.production +*.log +*.tsbuildinfo + +# ================================ +# IDE & OS-specific files +# ================================ +.vscode +.idea +.DS_Store +Thumbs.db +*.swp + +# ================================ +# Version control & CI files +# ================================ +.git +.gitignore + +# ================================ +# Docker & local orchestration +# ================================ +Dockerfile +Dockerfile.* +.dockerignore +docker-compose.yml +docker-compose*.yml + +# ================================ +# Miscellaneous +# ================================ +*.bak +*.old +*.tmp +``` + +### Step 4: Create the `nginx.conf` file + +To serve your Angular application efficiently inside the container, you’ll configure NGINX with a custom setup. This configuration is optimized for performance, browser caching, gzip compression, and support for client-side routing. + +Create a file named `nginx.conf` in the root of your project directory, and add the following content: + +> [!NOTE] +> To learn more about configuring NGINX, see the [official NGINX documentation](https://nginx.org/en/docs/). + + +```nginx +worker_processes auto; + +pid /tmp/nginx.pid; + +events { + worker_connections 1024; +} + +http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + # Logging + access_log off; + error_log /dev/stderr warn; + + # Performance + sendfile on; + tcp_nopush on; + tcp_nodelay on; + keepalive_timeout 65; + keepalive_requests 1000; + + # Compression + gzip on; + gzip_vary on; + gzip_proxied any; + gzip_min_length 256; + gzip_comp_level 6; + gzip_types + text/plain + text/css + text/xml + text/javascript + application/javascript + application/x-javascript + application/json + application/xml + application/xml+rss + font/ttf + font/otf + image/svg+xml; + + server { + listen 8080; + server_name localhost; + + root /usr/share/nginx/html; + index index.html; + + # Angular Routing + location / { + try_files $uri $uri/ /index.html; + } + + # Static Assets Caching + location ~* \.(?:ico|css|js|gif|jpe?g|png|woff2?|eot|ttf|svg|map)$ { + expires 1y; + access_log off; + add_header Cache-Control "public, immutable"; + } + + # Optional: Explicit asset route + location /assets/ { + expires 1y; + add_header Cache-Control "public, immutable"; + } + } +} +``` + +### Step 5: Build the Angular application image + +With your custom configuration in place, you're now ready to build the Docker image for your Angular application. + +The updated setup includes: + +- The updated setup includes a clean, production-ready NGINX configuration tailored specifically for Angular. +- Efficient multi-stage Docker build, ensuring a small and secure final image. + +After completing the previous steps, your project directory should now contain the following files: + +```text +├── docker-angular-sample/ +│ ├── Dockerfile +│ ├── .dockerignore +│ ├── compose.yaml +│ ├── nginx.conf +│ └── README.Docker.md +``` + +Now that your Dockerfile is configured, you can build the Docker image for your Angular application. + +> [!NOTE] +> The `docker build` command packages your application into an image using the instructions in the Dockerfile. It includes all necessary files from the current directory (called the [build context](/build/concepts/context/#what-is-a-build-context)). + +Run the following command from the root of your project: + +```console +$ docker build --tag docker-angular-sample . +``` + +What this command does: +- Uses the Dockerfile in the current directory (.) +- Packages the application and its dependencies into a Docker image +- Tags the image as docker-angular-sample so you can reference it later + + +#### Step 6: View local images + +After building your Docker image, you can check which images are available on your local machine using either the Docker CLI or [Docker Desktop](/manuals/desktop/use-desktop/images.md). Since you're already working in the terminal, let's use the Docker CLI. + +To list all locally available Docker images, run the following command: + +```console +$ docker images +``` + +Example Output: + +```shell +REPOSITORY TAG IMAGE ID CREATED SIZE +docker-angular-sample latest 34e66bdb9d40 14 seconds ago 76.4MB +``` + +This output provides key details about your images: + +- **Repository** – The name assigned to the image. +- **Tag** – A version label that helps identify different builds (e.g., latest). +- **Image ID** – A unique identifier for the image. +- **Created** – The timestamp indicating when the image was built. +- **Size** – The total disk space used by the image. + +If the build was successful, you should see `docker-angular-sample` image listed. + +--- + +## Run the containerized application + +In the previous step, you created a Dockerfile for your Angular application and built a Docker image using the docker build command. Now it’s time to run that image in a container and verify that your application works as expected. + + +Inside the `docker-angular-sample` directory, run the following command in a +terminal. + +```console +$ docker compose up --build +``` + +Open a browser and view the application at [http://localhost:8080](http://localhost:8080). You should see a simple Angular web application. + +Press `ctrl+c` in the terminal to stop your application. + +### Run the application in the background + +You can run the application detached from the terminal by adding the `-d` +option. Inside the `docker-angular-sample` directory, run the following command +in a terminal. + +```console +$ docker compose up --build -d +``` + +Open a browser and view the application at [http://localhost:8080](http://localhost:8080). You should see your Angular application running in the browser. + + +To confirm that the container is running, use `docker ps` command: + +```console +$ docker ps +``` + +This will list all active containers along with their ports, names, and status. Look for a container exposing port 8080. + +Example Output: + +```shell +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +eb13026806d1 docker-angular-sample-server "nginx -c /etc/nginx…" About a minute ago Up About a minute 0.0.0.0:8080->8080/tcp docker-angular-sample-server-1 +``` + + +To stop the application, run: + +```console +$ docker compose down +``` + + +> [!NOTE] +> For more information about Compose commands, see the [Compose CLI +> reference](/reference/cli/docker/compose/_index.md). + +--- + +## Summary + +In this guide, you learned how to containerize, build, and run an Angular application using Docker. By following best practices, you created a secure, optimized, and production-ready setup. + +What you accomplished: +- Initialized your project using `docker init` to scaffold essential Docker configuration files. +- Replaced the default `Dockerfile` with a multi-stage build that compiles the Angular application and serves the static files using Nginx. +- Replaced the default `.dockerignore` file to exclude unnecessary files and keep the image clean and efficient. +- Built your Docker image using `docker build`. +- Ran the container using `docker compose up`, both in the foreground and in detached mode. +- Verified that the app was running by visiting [http://localhost:8080](http://localhost:8080). +- Learned how to stop the containerized application using `docker compose down`. + +You now have a fully containerized Angular application, running in a Docker container, and ready for deployment across any environment with confidence and consistency. + +--- + +## Related resources + +Explore official references and best practices to sharpen your Docker workflow: + +- [Multi-stage builds](/build/building/multi-stage/) – Learn how to separate build and runtime stages. +- [Best practices for writing Dockerfiles](/develop/develop-images/dockerfile_best-practices/) – Write efficient, maintainable, and secure Dockerfiles. +- [Build context in Docker](/build/concepts/context/) – Learn how context affects image builds. +- [`docker init` CLI reference](/reference/cli/docker/init/) – Scaffold Docker assets automatically. +- [`docker build` CLI reference](/reference/cli/docker/build/) – Build Docker images from a Dockerfile. +- [`docker images` CLI reference](/reference/cli/docker/images/) – Manage and inspect local Docker images. +- [`docker compose up` CLI reference](/reference/cli/docker/compose/up/) – Start and run multi-container applications. +- [`docker compose down` CLI reference](/reference/cli/docker/compose/down/) – Stop and remove containers, networks, and volumes. + +--- + +## Next steps + +With your Angular application now containerized, you're ready to move on to the next step. + +In the next section, you'll learn how to develop your application using Docker containers, enabling a consistent, isolated, and reproducible development environment across any machine. + diff --git a/content/guides/angular/deploy.md b/content/guides/angular/deploy.md new file mode 100644 index 000000000000..a76778166413 --- /dev/null +++ b/content/guides/angular/deploy.md @@ -0,0 +1,201 @@ +--- +title: Test your Angular deployment +linkTitle: Test your deployment +weight: 60 +keywords: deploy, kubernetes, angular +description: Learn how to deploy locally to test and debug your Kubernetes deployment + +--- + +## Prerequisites + +Before you begin, make sure you’ve completed the following: +- Complete all the previous sections of this guide, starting with [Containerize Angular application](containerize.md). +- [Enable Kubernetes](/manuals/desktop/features/kubernetes.md#install-and-turn-on-kubernetes) in Docker Desktop. + +> **New to Kubernetes?** +> Visit the [Kubernetes basics tutorial](https://kubernetes.io/docs/tutorials/kubernetes-basics/) to get familiar with how clusters, pods, deployments, and services work. + +--- + +## Overview + +This section guides you through deploying your containerized Angular application locally using [Docker Desktop’s built-in Kubernetes](/desktop/kubernetes/). Running your app in a local Kubernetes cluster closely simulates a real production environment, enabling you to test, validate, and debug your workloads with confidence before promoting them to staging or production. + +--- + +## Create a Kubernetes YAML file + +Follow these steps to define your deployment configuration: + +1. In the root of your project, create a new file named: angular-sample-kubernetes.yaml + +2. Open the file in your IDE or preferred text editor. + +3. Add the following configuration, and be sure to replace `{DOCKER_USERNAME}` and `{DOCKERHUB_PROJECT_NAME}` with your actual Docker Hub username and repository name from the previous [Automate your builds with GitHub Actions](configure-github-actions.md). + + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: angular-sample + namespace: default +spec: + replicas: 1 + selector: + matchLabels: + app: angular-sample + template: + metadata: + labels: + app: angular-sample + spec: + containers: + - name: angular-container + image: {DOCKER_USERNAME}/{DOCKERHUB_PROJECT_NAME}:latest + imagePullPolicy: Always + ports: + - containerPort: 8080 + resources: + limits: + cpu: "500m" + memory: "256Mi" + requests: + cpu: "250m" + memory: "128Mi" +--- +apiVersion: v1 +kind: Service +metadata: + name: angular-sample-service + namespace: default +spec: + type: NodePort + selector: + app: angular-sample + ports: + - port: 8080 + targetPort: 8080 + nodePort: 30001 +``` + +This manifest defines two key Kubernetes resources, separated by `---`: + +- Deployment + Deploys a single replica of your Angular application inside a pod. The pod uses the Docker image built and pushed by your GitHub Actions CI/CD workflow + (refer to [Automate your builds with GitHub Actions](configure-github-actions.md)). + The container listens on port `8080`, which is typically used by [Nginx](https://nginx.org/en/docs/) to serve your production Angular app. + +- Service (NodePort) + Exposes the deployed pod to your local machine. + It forwards traffic from port `30001` on your host to port `8080` inside the container. + This lets you access the application in your browser at [http://localhost:30001](http://localhost:30001). + +> [!NOTE] +> To learn more about Kubernetes objects, see the [Kubernetes documentation](https://kubernetes.io/docs/home/). + +--- + +## Deploy and check your application + +Follow these steps to deploy your containerized Angular app into a local Kubernetes cluster and verify that it’s running correctly. + +### Step 1. Apply the Kubernetes configuration + +In your terminal, navigate to the directory where your `angular-sample-kubernetes.yaml` file is located, then deploy the resources using: + +```console + $ kubectl apply -f angular-sample-kubernetes.yaml +``` + +If everything is configured properly, you’ll see confirmation that both the Deployment and the Service were created: + +```shell + deployment.apps/angular-sample created + service/angular-sample-service created +``` + +This confirms that both the Deployment and the Service were successfully created and are now running inside your local cluster. + +### Step 2. Check the Deployment status + +Run the following command to check the status of your deployment: + +```console + $ kubectl get deployments +``` + +You should see output similar to the following: + +```shell + NAME READY UP-TO-DATE AVAILABLE AGE + angular-sample 1/1 1 1 14s +``` + +This confirms that your pod is up and running with one replica available. + +### Step 3. Verify the Service exposure + +Check if the NodePort service is exposing your app to your local machine: + +```console +$ kubectl get services +``` + +You should see something like: + +```shell +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +angular-sample-service NodePort 10.100.185.105 8080:30001/TCP 1m +``` + +This output confirms that your app is available via NodePort on port 30001. + +### Step 4. Access your app in the browser + +Open your browser and navigate to [http://localhost:30001](http://localhost:30001). + +You should see your production-ready Angular Sample application running — served by your local Kubernetes cluster. + +### Step 5. Clean up Kubernetes resources + +Once you're done testing, you can delete the deployment and service using: + +```console + $ kubectl delete -f angular-sample-kubernetes.yaml +``` + +Expected output: + +```shell + deployment.apps "angular-sample" deleted + service "angular-sample-service" deleted +``` + +This ensures your cluster stays clean and ready for the next deployment. + +--- + +## Summary + +In this section, you learned how to deploy your Angular application to a local Kubernetes cluster using Docker Desktop. This setup allows you to test and debug your containerized app in a production-like environment before deploying it to the cloud. + +What you accomplished: + +- Created a Kubernetes Deployment and NodePort Service for your Angular app +- Used `kubectl apply` to deploy the application locally +- Verified the app was running and accessible at `http://localhost:30001` +- Cleaned up your Kubernetes resources after testing + +--- + +## Related resources + +Explore official references and best practices to sharpen your Kubernetes deployment workflow: + +- [Kubernetes documentation](https://kubernetes.io/docs/home/) – Learn about core concepts, workloads, services, and more. +- [Deploy on Kubernetes with Docker Desktop](/manuals/desktop/features/kubernetes.md) – Use Docker Desktop’s built-in Kubernetes support for local testing and development. +- [`kubectl` CLI reference](https://kubernetes.io/docs/reference/kubectl/) – Manage Kubernetes clusters from the command line. +- [Kubernetes Deployment resource](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) – Understand how to manage and scale applications using Deployments. +- [Kubernetes Service resource](https://kubernetes.io/docs/concepts/services-networking/service/) – Learn how to expose your application to internal and external traffic. \ No newline at end of file diff --git a/content/guides/angular/develop.md b/content/guides/angular/develop.md new file mode 100644 index 000000000000..4447859e7492 --- /dev/null +++ b/content/guides/angular/develop.md @@ -0,0 +1,179 @@ +--- +title: Use containers for Angular development +linkTitle: Develop your app +weight: 30 +keywords: angular, development, node +description: Learn how to develop your Angular application locally using containers. + +--- + +## Prerequisites + +Complete [Containerize Angular application](containerize.md). + +--- + +## Overview + +In this section, you'll learn how to set up both production and development environments for your containerized Angular application using Docker Compose. This setup allows you to serve a static production build via Nginx and to develop efficiently inside containers using a live-reloading dev server with Compose Watch. + +You’ll learn how to: +- Configure separate containers for production and development +- Enable automatic file syncing using Compose Watch in development +- Debug and live-preview your changes in real-time without manual rebuilds + +--- + +## Automatically update services (Development Mode) + +Use Compose Watch to automatically sync source file changes into your containerized development environment. This provides a seamless, efficient development experience without restarting or rebuilding containers manually. + +## Step 1: Create a development Dockerfile + +Create a file named `Dockerfile.dev` in your project root with the following content: + +```dockerfile +# ========================================= +# Stage 1: Development - Angular Application +# ========================================= + +# Define the Node.js version to use (Alpine for a small footprint) +ARG NODE_VERSION=22.14.0-alpine + +# Set the base image for development +FROM node:${NODE_VERSION} AS dev + +# Set environment variable to indicate development mode +ENV NODE_ENV=development + +# Set the working directory inside the container +WORKDIR /app + +# Copy only the dependency files first to optimize Docker caching +COPY package.json package-lock.json ./ + +# Install dependencies using npm with caching to speed up subsequent builds +RUN --mount=type=cache,target=/root/.npm npm ci + +# Copy all application source files into the container +COPY . . + +# Expose the port Angular uses for the dev server (default is 4200) +EXPOSE 4200 + +# Start the Angular dev server and bind it to all network interfaces +CMD ["npm", "start", "--", "--host=0.0.0.0"] + +``` + +This file sets up a lightweight development environment for your Angular application using the dev server. + + +### Step 2: Update your `compose.yaml` file + +Open your `compose.yaml` file and define two services: one for production (`angular-prod`) and one for development (`angular-dev`). + +Here’s an example configuration for an Angular application: + +```yaml +services: + angular-prod: + build: + context: . + dockerfile: Dockerfile + image: docker-angular-sample + ports: + - "8080:8080" + + angular-dev: + build: + context: . + dockerfile: Dockerfile.dev + ports: + - "4200:4200" + develop: + watch: + - action: sync + path: . + target: /app +``` +- The `angular-prod` service builds and serves your static production app using Nginx. +- The `angular-dev` service runs your Angular development server with live reload and hot module replacement. +- `watch` triggers file sync with Compose Watch. + +> [!NOTE] +> For more details, see the official guide: [Use Compose Watch](/manuals/compose/how-tos/file-watch.md). + +After completing the previous steps, your project directory should now contain the following files: + +```text +├── docker-angular-sample/ +│ ├── Dockerfile +│ ├── Dockerfile.dev +│ ├── .dockerignore +│ ├── compose.yaml +│ ├── nginx.conf +│ └── README.Docker.md +``` + +### Step 4: Start Compose Watch + +Run the following command from the project root to start the container in watch mode + +```console +$ docker compose watch angular-dev +``` + +### Step 5: Test Compose Watch with Angular + +To verify that Compose Watch is working correctly: + +1. Open the `src/app/app.component.html` file in your text editor. + +2. Locate the following line: + + ```html +

Docker Angular Sample Application

+ ``` + +3. Change it to: + + ```html +

Hello from Docker Compose Watch

+ ``` + +4. Save the file. + +5. Open your browser at [http://localhost:4200](http://localhost:4200). + +You should see the updated text appear instantly, without needing to rebuild the container manually. This confirms that file watching and automatic synchronization are working as expected. + +--- + +## Summary + +In this section, you set up a complete development and production workflow for your Angular application using Docker and Docker Compose. + +Here’s what you accomplished: +- Created a `Dockerfile.dev` to streamline local development with hot reloading +- Defined separate `angular-dev` and `angular-prod` services in your `compose.yaml` file +- Enabled real-time file syncing using Compose Watch for a smoother development experience +- Verified that live updates work seamlessly by modifying and previewing a component + +With this setup, you're now equipped to build, run, and iterate on your Angular app entirely within containers—efficiently and consistently across environments. + +--- + +## Related resources + +Deepen your knowledge and improve your containerized development workflow with these guides: + +- [Using Compose Watch](/manuals/compose/how-tos/file-watch.md) – Automatically sync source changes during development +- [Multi-stage builds](/manuals/build/building/multi-stage.md) – Create efficient, production-ready Docker images +- [Dockerfile best practices](/build/building/best-practices/) – Write clean, secure, and optimized Dockerfiles. +- [Compose file reference](/compose/compose-file/) – Learn the full syntax and options available for configuring services in `compose.yaml`. +- [Docker volumes](/storage/volumes/) – Persist and manage data between container runs + +## Next steps + +In the next section, you'll learn how to run unit tests for your Angular application inside Docker containers. This ensures consistent testing across all environments and removes dependencies on local machine setup. diff --git a/content/guides/angular/run-tests.md b/content/guides/angular/run-tests.md new file mode 100644 index 000000000000..1e14971bba33 --- /dev/null +++ b/content/guides/angular/run-tests.md @@ -0,0 +1,138 @@ +--- +title: Run Angular tests in a container +linkTitle: Run your tests +weight: 40 +keywords: angular, test, jasmine +description: Learn how to run your Angular tests in a container. + +--- + +## Prerequisites + +Complete all the previous sections of this guide, starting with [Containerize Angular application](containerize.md). + +## Overview + +Testing is a critical part of the development process. In this section, you'll learn how to: + +- Run Jasmine unit tests using the Angular CLI inside a Docker container. +- Use Docker Compose to isolate your test environment. +- Ensure consistency between local and container-based testing. + + +The `docker-angular-sample` project comes pre-configured with Jasmine, so you can get started quickly without extra setup. + +--- + +## Run tests during development + +The `docker-angular-sample` application includes a sample test file at the following location: + +```console +$ src/app/app.component.spec.ts +``` + +This test uses Jasmine to validate the AppComponent logic. + +### Step 1: Update compose.yaml + +Add a new service named `angular-test` to your `compose.yaml` file. This service allows you to run your test suite in an isolated, containerized environment. + +```yaml {hl_lines="22-26",linenos=true} +services: + angular-dev: + build: + context: . + dockerfile: Dockerfile.dev + ports: + - "5173:5173" + develop: + watch: + - action: sync + path: . + target: /app + + angular-prod: + build: + context: . + dockerfile: Dockerfile + image: docker-angular-sample + ports: + - "8080:8080" + + angular-test: + build: + context: . + dockerfile: Dockerfile.dev + command: ["npm", "run", "test"] + +``` + +The angular-test service reuses the same `Dockerfile.dev` used for [development](develop.md) and overrides the default command to run tests with `npm run test`. This setup ensures a consistent test environment that matches your local development configuration. + + +After completing the previous steps, your project directory should contain the following files: + +```text +├── docker-angular-sample/ +│ ├── Dockerfile +│ ├── Dockerfile.dev +│ ├── .dockerignore +│ ├── compose.yaml +│ ├── nginx.conf +│ └── README.Docker.md +``` + +### Step 2: Run the tests + +To execute your test suite inside the container, run the following command from your project root: + +```console +$ docker compose run --rm angular-test +``` + +This command will: +- Start the `angular-test` service defined in your `compose.yaml` file. +- Execute the `npm run test` script using the same environment as development. +- Automatically removes the container after tests complete, using the [`docker compose run --rm`](/engine/reference/commandline/compose_run) command. + +You should see output similar to the following: + +```shell +Test Suites: 1 passed, 1 total +Tests: 3 passed, 3 total +Snapshots: 0 total +Time: 1.529 s +``` + +> [!NOTE] +> For more information about Compose commands, see the [Compose CLI +> reference](/reference/cli/docker/compose/_index.md). + +--- + +## Summary + +In this section, you learned how to run unit tests for your Angular application inside a Docker container using Jasmine and Docker Compose. + +What you accomplished: +- Created a `angular-test` service in `compose.yaml` to isolate test execution. +- Reused the development `Dockerfile.dev` to ensure consistency between dev and test environments. +- Ran tests inside the container using `docker compose run --rm angular-test`. +- Ensured reliable, repeatable testing across environments without depending on your local machine setup. + +--- + +## Related resources + +Explore official references and best practices to sharpen your Docker testing workflow: + +- [Dockerfile reference](/reference/dockerfile/) – Understand all Dockerfile instructions and syntax. +- [Best practices for writing Dockerfiles](/develop/develop-images/dockerfile_best-practices/) – Write efficient, maintainable, and secure Dockerfiles. +- [Compose file reference](/compose/compose-file/) – Learn the full syntax and options available for configuring services in `compose.yaml`. +- [`docker compose run` CLI reference](/reference/cli/docker/compose/run/) – Run one-off commands in a service container. +--- + +## Next steps + +Next, you’ll learn how to set up a CI/CD pipeline using GitHub Actions to automatically build and test your Angular application in a containerized environment. This ensures your code is validated on every push or pull request, maintaining consistency and reliability across your development workflow. diff --git a/content/guides/azure-pipelines.md b/content/guides/azure-pipelines.md new file mode 100644 index 000000000000..466638858e96 --- /dev/null +++ b/content/guides/azure-pipelines.md @@ -0,0 +1,311 @@ +--- +title: Introduction to Azure Pipelines with Docker +linkTitle: Azure Pipelines and Docker +summary: | + Learn how to automate Docker image build and push using Azure Pipelines. +params: + tags: [devops] + time: 10 minutes +--- + +> This guide is a community contribution. Docker would like to thank [Kristiyan Velkov](https://www.linkedin.com/in/kristiyan-velkov-763130b3/) for his valuable contribution. + +## Prerequisites + +Before you begin, ensure you have the following requirements: + +- A [Docker Hub account](https://hub.docker.com) with a generated access token. +- An active [Azure DevOps project](https://dev.azure.com/) with a connected [Git repository](https://learn.microsoft.com/en-us/azure/devops/repos/git/?view=azure-devops). +- A project that includes a valid [`Dockerfile`](https://docs.docker.com/engine/reference/builder/) at its root or appropriate build context. + +## Overview + +This guide walks you through building and pushing Docker images using [Azure Pipelines](https://azure.microsoft.com/en-us/products/devops/pipelines), enabling a streamlined and secure CI workflow for containerized applications. You’ll learn how to: + +- Configure Docker authentication securely. +- Set up an automated pipeline to build and push images. + +## Set up Azure DevOps to work with Docker Hub + +### Step 1: Configure a Docker Hub service connection + +To securely authenticate with Docker Hub using Azure Pipelines: + +1. Navigate to **Project Settings > Service Connections** in your Azure DevOps project. +2. Select **New service connection > Docker Registry**. +3. Choose **Docker Hub** and provide your Docker Hub credentials or access token. +4. Give the service connection a recognizable name, such as `my-docker-registry`. +5. Grant access only to the specific pipeline(s) that require it for improved security and least privilege. + +> [!IMPORTANT] +> +> Avoid selecting the option to grant access to all pipelines unless absolutely necessary. Always apply the principle of least privilege. + +### Step 2: Create your pipeline + +Add the following `azure-pipelines.yml` file to the root of your repository: + +```yaml +# Trigger pipeline on commits to the main branch +trigger: + - main + +# Trigger pipeline on pull requests targeting the main branch +pr: + - main + +# Define variables for reuse across the pipeline +variables: + imageName: 'docker.io/$(dockerUsername)/my-image' + buildTag: '$(Build.BuildId)' + latestTag: 'latest' + +stages: + - stage: BuildAndPush + displayName: Build and Push Docker Image + jobs: + - job: DockerJob + displayName: Build and Push + pool: + vmImage: ubuntu-latest + demands: + - docker + steps: + - checkout: self + displayName: Checkout Code + + - task: Docker@2 + displayName: Docker Login + inputs: + command: login + containerRegistry: 'my-docker-registry' # Service connection name + + - task: Docker@2 + displayName: Build Docker Image + inputs: + command: build + repository: $(imageName) + tags: | + $(buildTag) + $(latestTag) + dockerfile: './Dockerfile' + arguments: | + --sbom=true + --attest type=provenance + --cache-from $(imageName):latest + env: + DOCKER_BUILDKIT: 1 + + - task: Docker@2 + displayName: Push Docker Image + condition: eq(variables['Build.SourceBranch'], 'refs/heads/main') + inputs: + command: push + repository: $(imageName) + tags: | + $(buildTag) + $(latestTag) + + # Optional: logout for self-hosted agents + - script: docker logout + displayName: Docker Logout (Self-hosted only) + condition: ne(variables['Agent.OS'], 'Windows_NT') +``` + +## What this pipeline does + +This pipeline automates the Docker image build and deployment process for the main branch. It ensures a secure and efficient workflow with best practices like caching, tagging, and conditional cleanup. Here's what it does: + +- Triggers on commits and pull requests targeting the `main` branch. +- Authenticates securely with Docker Hub using an Azure DevOps service connection. +- Builds and tags the Docker image using Docker BuildKit for caching. +- Pushes both buildId and latest tags to Docker Hub. +- Logs out from Docker if running on a self-hosted Linux agent. + + +## How the pipeline works + +### Step 1: Define pipeline triggers + +```yaml +trigger: + - main + +pr: + - main +``` + +This pipeline is triggered automatically on: +- Commits pushed to the `main` branch +- Pull requests targeting `main` main branch + +> [!TIP] +> Learn more: [Define pipeline triggers in Azure Pipelines](https://learn.microsoft.com/en-us/azure/devops/pipelines/build/triggers?view=azure-devops) + +### Step 2: Define common variables + +```yaml +variables: + imageName: 'docker.io/$(dockerUsername)/my-image' + buildTag: '$(Build.BuildId)' + latestTag: 'latest' +``` + +These variables ensure consistent naming, versioning, and reuse throughout the pipeline steps: + +- `imageName`: your image path on Docker Hub +- `buildTag`: a unique tag for each pipeline run +- `latestTag`: a stable alias for your most recent image + +> [!IMPORTANT] +> +> The variable `dockerUsername` is not set automatically. +> Set it securely in your Azure DevOps pipeline variables: +> 1. Go to **Pipelines > Edit > Variables** +> 2. Add `dockerUsername` with your Docker Hub username +> +> Learn more: [Define and use variables in Azure Pipelines](https://learn.microsoft.com/en-us/azure/devops/pipelines/process/variables?view=azure-devops&tabs=yaml%2Cbatch) + +### Step 3: Define pipeline stages and jobs + +```yaml +stages: + - stage: BuildAndPush + displayName: Build and Push Docker Image +``` + +This stage executes only if the source branch is `main`. + +> [!TIP] +> +> Learn more: [Stage conditions in Azure Pipelines](https://learn.microsoft.com/en-us/azure/devops/pipelines/process/stages?view=azure-devops&tabs=yaml) + + +### Step 4: Job configuration + +```yaml +jobs: + - job: DockerJob + displayName: Build and Push + pool: + vmImage: ubuntu-latest + demands: + - docker +``` + +This job utilizes the latest Ubuntu VM image with Docker support, provided by Microsoft-hosted agents. It can be replaced with a custom pool for self-hosted agents if necessary. + +> [!TIP] +> +> Learn more: [Specify a pool in your pipeline](https://learn.microsoft.com/en-us/azure/devops/pipelines/agents/pools-queues?view=azure-devops&tabs=yaml%2Cbrowser) + +#### Step 4.1: Checkout code + +```yaml +steps: + - checkout: self + displayName: Checkout Code +``` + +This step pulls your repository code into the build agent, so the pipeline can access the Dockerfile and application files. + +> [!TIP] +> +> Learn more: [checkout step documentation](https://learn.microsoft.com/en-us/azure/devops/pipelines/yaml-schema/steps-checkout?view=azure-pipelines) + +#### Step 4.2: Authenticate to Docker Hub + +```yaml +- task: Docker@2 + displayName: Docker Login + inputs: + command: login + containerRegistry: 'my-docker-registry' # Replace with your service connection name +``` + +Uses a pre-configured Azure DevOps Docker registry service connection to authenticate securely without exposing credentials directly. + +> [!TIP] +> +> Learn more: [Use service connections for Docker Hub](https://learn.microsoft.com/en-us/azure/devops/pipelines/library/service-endpoints?view=azure-devops#docker-hub-or-others) + +#### Step 4.3: Build the Docker image + +```yaml + - task: Docker@2 + displayName: Build Docker Image + inputs: + command: build + repository: $(imageName) + tags: | + $(buildTag) + $(latestTag) + dockerfile: './Dockerfile' + arguments: | + --sbom=true + --attest type=provenance + --cache-from $(imageName):latest + env: + DOCKER_BUILDKIT: 1 +``` + +This builds the image with: + +- Two tags: one with the unique Build ID and one as latest +- Docker BuildKit enabled for faster builds and efficient layer caching +- Cache pull from the most recent pushed latest image +- Software Bill of Materials (SBOM) for supply chain transparency +- Provenance attestation to verify how and where the image was built + +> [!TIP] +> +> Learn more: +> - [Docker task for Azure Pipelines](https://learn.microsoft.com/en-us/azure/devops/pipelines/tasks/reference/docker-v2?view=azure-pipelines&tabs=yaml) +> - [Docker SBOM Attestations](/manuals/build/metadata/attestations/slsa-provenance.md) + +#### Step 4.4: Push the Docker image + +```yaml +- task: Docker@2 + displayName: Push Docker Image + condition: eq(variables['Build.SourceBranch'], 'refs/heads/main') + inputs: + command: push + repository: $(imageName) + tags: | + $(buildTag) + $(latestTag) +``` + +By applying this condition, the pipeline builds the Docker image on every run to ensure early detection of issues, but only pushes the image to the registry when changes are merged into the main branch—keeping your Docker Hub clean and focused + +This uploads both tags to Docker Hub: +- `$(buildTag)` ensures traceability per run. +- `latest` is used for most recent image references. + +#### Step 4.5 Logout of Docker (self-hosted agents) + +```yaml +- script: docker logout + displayName: Docker Logout (Self-hosted only) + condition: ne(variables['Agent.OS'], 'Windows_NT') +``` + +Executes docker logout at the end of the pipeline on Linux-based self-hosted agents to proactively clean up credentials and enhance security posture. + +## Summary + +With this Azure Pipelines CI setup, you get: + +- Secure Docker authentication using a built-in service connection. +- Automated image building and tagging triggered by code changes. +- Efficient builds leveraging Docker BuildKit cache. +- Safe cleanup with logout on persistent agents. +- Build images that meet modern software supply chain requirements with SBOM and attestation + +## Learn more + +- [Azure Pipelines Documentation](https://learn.microsoft.com/en-us/azure/devops/pipelines/?view=azure-devops): Comprehensive guide to configuring and managing CI/CD pipelines in Azure DevOps. +- [Docker Task for Azure Pipelines](https://learn.microsoft.com/en-us/azure/devops/pipelines/tasks/build/docker): Detailed reference for using the Docker task in Azure Pipelines to build and push images. +- [Docker Buildx Bake](/manuals/build/bake/_index.md): Explore Docker's advanced build tool for complex, multi-stage, and multi-platform build setups. See also the [Mastering Buildx Bake Guide](/guides/bake/index.md) for practical examples and best practices. +- [Docker Build Cloud](/guides/docker-build-cloud/_index.md): Learn about Docker's managed build service for faster, scalable, and multi-platform image builds in the cloud. diff --git a/content/guides/bun/configure-ci-cd.md b/content/guides/bun/configure-ci-cd.md index b14e34d39bbe..6b03a70c6abd 100644 --- a/content/guides/bun/configure-ci-cd.md +++ b/content/guides/bun/configure-ci-cd.md @@ -29,9 +29,9 @@ Create a GitHub repository, configure the Docker Hub credentials, and push your 2. Open the repository **Settings**, and go to **Secrets and variables** > **Actions**. -3. Create a new **Repository variable** named `DOCKER_USERNAME` and your Docker ID as value. +3. Create a new **Repository variable** named `DOCKER_USERNAME` and your Docker ID as a value. -4. Create a new [Personal Access Token (PAT)](/manuals/security/for-developers/access-tokens.md#create-an-access-token)for Docker Hub. You can name this token `docker-tutorial`. Make sure access permissions include Read and Write. +4. Create a new [Personal Access Token (PAT)](/manuals/security/access-tokens.md#create-an-access-token)for Docker Hub. You can name this token `docker-tutorial`. Make sure access permissions include Read and Write. 5. Add the PAT as a **Repository secret** in your GitHub repository, with the name `DOCKERHUB_TOKEN`. @@ -69,12 +69,12 @@ to Docker Hub. ```yaml name: ci - + on: push: branches: - main - + jobs: build: runs-on: ubuntu-latest diff --git a/content/guides/bun/containerize.md b/content/guides/bun/containerize.md index 3e914d5756ab..536f9f080d67 100644 --- a/content/guides/bun/containerize.md +++ b/content/guides/bun/containerize.md @@ -36,7 +36,7 @@ directory to a directory that you want to work in, and run the following command to clone the repository: ```console -$ git clone https://github.com/dockersamples/bun-docker.git +$ git clone https://github.com/dockersamples/bun-docker.git && cd bun-docker ``` You should now have the following contents in your `bun-docker` directory. diff --git a/content/guides/bun/develop.md b/content/guides/bun/develop.md index 0ef2815c37df..7de9bd63ed7e 100644 --- a/content/guides/bun/develop.md +++ b/content/guides/bun/develop.md @@ -23,7 +23,7 @@ In this section, you'll learn how to set up a development environment for your c Clone the sample application to use with this guide. Open a terminal, change directory to a directory that you want to work in, and run the following command to clone the repository: ```console -$ git clone https://github.com/dockersamples/bun-docker.git +$ git clone https://github.com/dockersamples/bun-docker.git && cd bun-docker ``` ## Automatically update services diff --git a/content/guides/cpp/_index.md b/content/guides/cpp/_index.md index 1bfc404dca5c..85f317765e3c 100644 --- a/content/guides/cpp/_index.md +++ b/content/guides/cpp/_index.md @@ -12,19 +12,21 @@ aliases: - /guides/language/cpp/ languages: [cpp] params: - time: 10 minutes + time: 20 minutes --- The C++ getting started guide teaches you how to create a containerized C++ application using Docker. In this guide, you'll learn how to: > **Acknowledgment** > -> Docker would like to thank [Pradumna Saraf](https://twitter.com/pradumna_saraf) for his contribution to this guide. +> Docker would like to thank [Pradumna Saraf](https://twitter.com/pradumna_saraf) and [Mohammad-Ali A'râbi](https://twitter.com/MohammadAliEN) for their contribution to this guide. -- Containerize and run a C++ application +- Containerize and run a C++ application using a multi-stage Docker build +- Build and run a C++ application using Docker Compose - Set up a local environment to develop a C++ application using containers - Configure a CI/CD pipeline for a containerized C++ application using GitHub Actions - Deploy your containerized application locally to Kubernetes to test and debug your deployment +- Use BuildKit to generate SBOM attestations during the build process After completing the C++ getting started modules, you should be able to containerize your own C++ application based on the examples and instructions provided in this guide. diff --git a/content/guides/cpp/configure-ci-cd.md b/content/guides/cpp/configure-ci-cd.md index 6f0e9de61396..c0d3bed4c87e 100644 --- a/content/guides/cpp/configure-ci-cd.md +++ b/content/guides/cpp/configure-ci-cd.md @@ -30,9 +30,9 @@ Create a GitHub repository, configure the Docker Hub credentials, and push your 2. Open the repository **Settings**, and go to **Secrets and variables** > **Actions**. -3. Create a new **Repository variable** named `DOCKER_USERNAME` and your Docker ID as value. +3. Create a new **Repository variable** named `DOCKER_USERNAME` and your Docker ID as a value. -4. Create a new [Personal Access Token (PAT)](/manuals/security/for-developers/access-tokens.md#create-an-access-token) for Docker Hub. You can name this token `docker-tutorial`. Make sure access permissions include Read and Write. +4. Create a new [Personal Access Token (PAT)](/manuals/security/access-tokens.md#create-an-access-token) for Docker Hub. You can name this token `docker-tutorial`. Make sure access permissions include Read and Write. 5. Add the PAT as a **Repository secret** in your GitHub repository, with the name `DOCKERHUB_TOKEN`. diff --git a/content/guides/cpp/containerize.md b/content/guides/cpp/containerize.md index 03042bc97e2b..96c19430b883 100644 --- a/content/guides/cpp/containerize.md +++ b/content/guides/cpp/containerize.md @@ -1,9 +1,9 @@ --- title: Containerize a C++ application -linkTitle: Containerize your app +linkTitle: Build and run a C++ application using Docker Compose weight: 10 keywords: C++, containerize, initialize -description: Learn how to containerize a C++ application. +description: Learn how to use Docker Compose to build and run a C++ application. aliases: - /language/cpp/containerize/ - /guides/language/cpp/containerize/ @@ -15,17 +15,17 @@ aliases: ## Overview -This section walks you through containerizing and running a C++ application. +This section walks you through containerizing and running a C++ application, using Docker Compose. ## Get the sample application -Clone the sample application to use with this guide. Open a terminal, change directory to a directory that you want to work in, and run the following command to clone the repository: +We're using the same sample repository that you used in the previous sections of this guide. If you haven't already cloned the repository, clone it now: ```console $ git clone https://github.com/dockersamples/c-plus-plus-docker.git ``` -You should now have the following contents in your `c-plus-plus-docker` +You should now have the following contents in your `c-plus-plus-docker` (root) directory. ```text diff --git a/content/guides/cpp/develop.md b/content/guides/cpp/develop.md index b3da1f6b8244..016eac56cefa 100644 --- a/content/guides/cpp/develop.md +++ b/content/guides/cpp/develop.md @@ -24,7 +24,7 @@ In this section, you'll learn how to set up a development environment for your c Clone the sample application to use with this guide. Open a terminal, change directory to a directory that you want to work in, and run the following command to clone the repository: ```console -$ git clone https://github.com/dockersamples/c-plus-plus-docker.git +$ git clone https://github.com/dockersamples/c-plus-plus-docker.git && cd c-plus-plus-docker ``` ## Automatically update services diff --git a/content/guides/cpp/multistage.md b/content/guides/cpp/multistage.md new file mode 100644 index 000000000000..0dbd3e35f58b --- /dev/null +++ b/content/guides/cpp/multistage.md @@ -0,0 +1,112 @@ +--- +title: Create a multi-stage build for your C++ application +linkTitle: Containerize your app using a multi-stage build +weight: 5 +keywords: C++, containerize, multi-stage +description: Learn how to create a multi-stage build for a C++ application. +aliases: +- /language/cpp/multistage/ +- /guides/language/cpp/multistage/ +--- + +## Prerequisites + +- You have a [Git client](https://git-scm.com/downloads). The examples in this section use a command-line based Git client, but you can use any client. + +## Overview + +This section walks you through creating a multi-stage Docker build for a C++ application. +A multi-stage build is a Docker feature that allows you to use different base images for different stages of the build process, +so you can optimize the size of your final image and separate build dependencies from runtime dependencies. + +The standard practice for compiled languages like C++ is to have a build stage that compiles the code and a runtime stage that runs the compiled binary, +because the build dependencies are not needed at runtime. + +## Get the sample application + +Let's use a simple C++ application that prints `Hello, World!` to the terminal. To do so, clone the sample repository to use with this guide: + +```bash +$ git clone https://github.com/dockersamples/c-plus-plus-docker.git +``` + +The example for this section is under the `hello` directory in the repository. Get inside it and take a look at the files: + +```bash +$ cd c-plus-plus-docker/hello +$ ls +``` + +You should see the following files: + +```text +Dockerfile hello.cpp +``` + +## Check the Dockerfile + +Open the `Dockerfile` in an IDE or text editor. The `Dockerfile` contains the instructions for building the Docker image. + +```Dockerfile +# Stage 1: Build stage +FROM ubuntu:latest AS build + +# Install build-essential for compiling C++ code +RUN apt-get update && apt-get install -y build-essential + +# Set the working directory +WORKDIR /app + +# Copy the source code into the container +COPY hello.cpp . + +# Compile the C++ code statically to ensure it doesn't depend on runtime libraries +RUN g++ -o hello hello.cpp -static + +# Stage 2: Runtime stage +FROM scratch + +# Copy the static binary from the build stage +COPY --from=build /app/hello /hello + +# Command to run the binary +CMD ["/hello"] +``` + +The `Dockerfile` has two stages: + +1. **Build stage**: This stage uses the `ubuntu:latest` image to compile the C++ code and create a static binary. +2. **Runtime stage**: This stage uses the `scratch` image, which is an empty image, to copy the static binary from the build stage and run it. + +## Build the Docker image + +To build the Docker image, run the following command in the `hello` directory: + +```bash +$ docker build -t hello . +``` + +The `-t` flag tags the image with the name `hello`. + +## Run the Docker container + +To run the Docker container, use the following command: + +```bash +$ docker run hello +``` + +You should see the output `Hello, World!` in the terminal. + +## Summary + +In this section, you learned how to create a multi-stage build for a C++ application. Multi-stage builds help you optimize the size of your final image and separate build dependencies from runtime dependencies. +In this example, the final image only contains the static binary and doesn't include any build dependencies. + +As the image has an empty base, the usual OS tools are also absent. So, for example, you can't run a simple `ls` command in the container: + +```bash +$ docker run hello ls +``` + +This makes the image very lightweight and secure. diff --git a/content/guides/cpp/security.md b/content/guides/cpp/security.md new file mode 100644 index 000000000000..733c23c9a82b --- /dev/null +++ b/content/guides/cpp/security.md @@ -0,0 +1,96 @@ +--- +title: Supply-chain security for C++ Docker images +linkTitle: Supply-chain security +weight: 60 +keywords: C++, security, multi-stage +description: Learn how to extract SBOMs from C++ Docker images. +aliases: +- /language/cpp/security/ +- /guides/language/cpp/security/ +--- + +## Prerequisites + +- You have a [Git client](https://git-scm.com/downloads). The examples in this section use a command-line based Git client, but you can use any client. +- You have a Docker Desktop installed, with containerd enabled for pulling and storing images (it's a checkbox in **Settings** > **General**). Otherwise, if you use Docker Engine: + - You have the [Docker SBOM CLI plugin](https://github.com/docker/sbom-cli-plugin) installed. To install it on Docker Engine, use the following command: + + ```bash + $ curl -sSfL https://raw.githubusercontent.com/docker/sbom-cli-plugin/main/install.sh | sh -s -- + ``` + + - You have the [Docker Scout CLI plugin](https://docs.docker.com/scout/install/) installed. To install it on Docker Engine, use the following command: + + ```bash + $ curl -sSfL https://raw.githubusercontent.com/docker/scout-cli/main/install.sh | sh -s -- + ``` + + - You have [containerd enabled](https://docs.docker.com/engine/storage/containerd/) for Docker Engine. + +## Overview + +This section walks you through extracting Software Bill of Materials (SBOMs) from a C++ Docker image using the Docker SBOM CLI plugin. SBOMs provide a detailed list of all the components in a software package, including their versions and licenses. You can use SBOMs to track the provenance of your software and ensure that it complies with your organization's security and licensing policies. + +## Generate an SBOM + +Here we will use the Docker image that we built in the [Create a multi-stage build for your C++ application](/guides/language/cpp/multistage/) guide. If you haven't already built the image, follow the steps in that guide to build the image. +The image is named `hello`. To generate an SBOM for the `hello` image, run the following command: + +```bash +$ docker sbom hello +``` + +The command will say "No packages discovered". This is because the final image is a scratch image and doesn't have any packages. +Let's try again with Docker Scout: + +```bash +$ docker scout sbom --format=list hello +``` + +This command will tell you the same thing. + +## Generate an SBOM attestation + +The SBOM can be generated during the build process and "attached" to the image. This is called an SBOM attestation. +To generate an SBOM attestation for the `hello` image, first let's change the Dockerfile: + +```Dockerfile +ARG BUILDKIT_SBOM_SCAN_STAGE=true + +FROM ubuntu:latest AS build + +RUN apt-get update && apt-get install -y build-essential + +WORKDIR /app + +COPY hello.cpp . + +RUN g++ -o hello hello.cpp -static + +# -------------------- +FROM scratch + +COPY --from=build /app/hello /hello + +CMD ["/hello"] +``` + +The first line `ARG BUILDKIT_SBOM_SCAN_STAGE=true` enables SBOM scanning in the build stage. +Now, build the image with the following command: + +```bash +$ docker buildx build --sbom=true -t hello:sbom . +``` + +This command will build the image and generate an SBOM attestation. You can verify that the SBOM is attached to the image by running the following command: + +```bash +$ docker scout sbom --format=list hello:sbom +``` + +Note that the normal `docker sbom` command will not load the SBOM attestation. + +## Summary + +In this section, you learned how to generate SBOM attestation for a C++ Docker image during the build process. +The normal image scanners will not be able to generate SBOMs from scratch images. \ No newline at end of file diff --git a/content/guides/databases.md b/content/guides/databases.md index 8cdea6f6a3d9..2ed465343d30 100644 --- a/content/guides/databases.md +++ b/content/guides/databases.md @@ -78,8 +78,8 @@ To run a container using the GUI: 1. In the Docker Desktop Dashboard, select the global search at the top of the window. 2. Specify `mysql` in the search box, and select the `Images` tab if not already selected. -3. Hover over the `msyql` image and select `Run`. - The **Run a new container** model appears. +3. Hover over the `mysql` image and select `Run`. + The **Run a new container** modal appears. 4. Expand **Optional settings**. 5. In the optional settings, specify the following: @@ -181,7 +181,7 @@ interact with your MySQL database. Before you begin, you must remove any containers you previously ran for this guide. To stop and remove a container, either: -- In a terminal, run `docker remove --force my-mysql` to remove the container +- In a terminal, run `docker rm --force my-mysql` to remove the container named `my-mysql`. - Or, in the Docker Desktop Dashboard, select the **Delete** icon next to your container in the **Containers** view. @@ -221,8 +221,8 @@ To run a container using the GUI: 1. In the Docker Desktop Dashboard, select the global search at the top of the window. 2. Specify `mysql` in the search box, and select the `Images` tab if not already selected. -3. Hover over the `msyql` image and select `Run`. - The **Run a new container** model appears. +3. Hover over the `mysql` image and select `Run`. + The **Run a new container** modal appears. 4. Expand **Optional settings**. 5. In the optional settings, specify the following: @@ -392,7 +392,7 @@ data persists: 2. Specify `mysql` in the search box, and select the **Images** tab if not already selected. 3. Hover over the **mysql** image and select **Run**. - The **Run a new container** model appears. + The **Run a new container** modal appears. 4. Expand **Optional settings**. 5. In the optional settings, specify the following: @@ -433,7 +433,7 @@ data persists: 2. Specify `mysql` in the search box, and select the **Images** tab if not already selected. 3. Hover over the **mysql** image and select **Run**. - The **Run a new container** model appears. + The **Run a new container** modal appears. 4. Expand **Optional settings**. 5. In the optional settings, specify the following: diff --git a/content/guides/deno/_index.md b/content/guides/deno/_index.md new file mode 100644 index 000000000000..a864479ed9a0 --- /dev/null +++ b/content/guides/deno/_index.md @@ -0,0 +1,34 @@ +--- +description: Containerize and develop Deno applications using Docker. +keywords: getting started, deno +title: Deno language-specific guide +summary: | + Learn how to containerize JavaScript applications with the Deno runtime using Docker. +linkTitle: Deno +languages: [js] +params: + time: 10 minutes +--- + +The Deno getting started guide teaches you how to create a containerized Deno application using Docker. In this guide, you'll learn how to: + +> **Acknowledgment** +> +> Docker would like to thank [Pradumna Saraf](https://twitter.com/pradumna_saraf) for his contribution to this guide. + +## What will you learn? + +* Containerize and run a Deno application using Docker +* Set up a local environment to develop a Deno application using containers +* Use Docker Compose to run the application. +* Configure a CI/CD pipeline for a containerized Deno application using GitHub Actions +* Deploy your containerized application locally to Kubernetes to test and debug your deployment + +## Prerequisites + +- Basic understanding of JavaScript is assumed. +- You must have familiarity with Docker concepts like containers, images, and Dockerfiles. If you are new to Docker, you can start with the [Docker basics](/get-started/docker-concepts/the-basics/what-is-a-container.md) guide. + +After completing the Deno getting started modules, you should be able to containerize your own Deno application based on the examples and instructions provided in this guide. + +Start by containerizing an existing Deno application. diff --git a/content/guides/python/configure-ci-cd.md b/content/guides/deno/configure-ci-cd.md similarity index 76% rename from content/guides/python/configure-ci-cd.md rename to content/guides/deno/configure-ci-cd.md index e82d06945dc0..06754b542583 100644 --- a/content/guides/python/configure-ci-cd.md +++ b/content/guides/deno/configure-ci-cd.md @@ -1,17 +1,16 @@ --- -title: Configure CI/CD for your Python application +title: Configure CI/CD for your Deno application linkTitle: Configure CI/CD weight: 40 -keywords: ci/cd, github actions, python, flask -description: Learn how to configure CI/CD using GitHub Actions for your Python application. +keywords: ci/cd, github actions, deno, shiny +description: Learn how to configure CI/CD using GitHub Actions for your Deno application. aliases: - - /language/python/configure-ci-cd/ - - /guides/language/python/configure-ci-cd/ +- /language/deno/configure-ci-cd/ --- ## Prerequisites -Complete all the previous sections of this guide, starting with [Containerize a Python application](containerize.md). You must have a [GitHub](https://github.com/signup) account and a [Docker](https://hub.docker.com/signup) account to complete this section. +Complete all the previous sections of this guide, starting with [Containerize a Deno application](containerize.md). You must have a [GitHub](https://github.com/signup) account and a [Docker](https://hub.docker.com/signup) account to complete this section. ## Overview @@ -30,9 +29,9 @@ Create a GitHub repository, configure the Docker Hub credentials, and push your 2. Open the repository **Settings**, and go to **Secrets and variables** > **Actions**. -3. Create a new **Repository variable** named `DOCKER_USERNAME` and your Docker ID as value. +3. Create a new **Repository variable** named `DOCKER_USERNAME` and your Docker ID as a value. -4. Create a new [Personal Access Token (PAT)](/manuals/security/for-developers/access-tokens.md#create-an-access-token) for Docker Hub. You can name this token `docker-tutorial`. Make sure access permissions include Read and Write. +4. Create a new [Personal Access Token (PAT)](/manuals/security/access-tokens.md#create-an-access-token)for Docker Hub. You can name this token `docker-tutorial`. Make sure access permissions include Read and Write. 5. Add the PAT as a **Repository secret** in your GitHub repository, with the name `DOCKERHUB_TOKEN`. @@ -56,7 +55,7 @@ Create a GitHub repository, configure the Docker Hub credentials, and push your ## Step two: Set up the workflow -Set up your GitHub Actions workflow for building, testing, and pushing the image +Set up your GitHub Actions workflow for building and pushing the image to Docker Hub. 1. Go to your repository on GitHub and then select the **Actions** tab. @@ -66,30 +65,31 @@ to Docker Hub. This takes you to a page for creating a new GitHub actions workflow file in your repository, under `.github/workflows/main.yml` by default. -3. In the editor window, copy and paste the following YAML configuration. +3. In the editor window, copy and paste the following YAML configuration and commit the changes. ```yaml name: ci - + on: push: branches: - main - + jobs: build: runs-on: ubuntu-latest steps: - - name: Login to Docker Hub + - + name: Login to Docker Hub uses: docker/login-action@v3 with: username: ${{ vars.DOCKER_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Set up Docker Buildx + - + name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - - - name: Build and push + - + name: Build and push uses: docker/build-push-action@v6 with: platforms: linux/amd64,linux/arm64 @@ -120,13 +120,11 @@ Save the workflow file and run the job. ## Summary -In this section, you learned how to set up a GitHub Actions workflow for your Python application. +In this section, you learned how to set up a GitHub Actions workflow for your Deno application. Related information: - -- [Introduction to GitHub Actions](/guides/gha.md) -- [Docker Build GitHub Actions](/manuals/build/ci/github-actions/_index.md) -- [Workflow syntax for GitHub Actions](https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions) + - [Introduction to GitHub Actions](/manuals/build/ci/github-actions/_index.md) + - [Workflow syntax for GitHub Actions](https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions) ## Next steps diff --git a/content/guides/deno/containerize.md b/content/guides/deno/containerize.md new file mode 100644 index 000000000000..70175896df43 --- /dev/null +++ b/content/guides/deno/containerize.md @@ -0,0 +1,146 @@ +--- +title: Containerize a Deno application +linkTitle: Containerize your app +weight: 10 +keywords: deno, containerize, initialize +description: Learn how to containerize a Deno application. +aliases: + - /language/deno/containerize/ +--- + +## Prerequisites + +* You have a [Git client](https://git-scm.com/downloads). The examples in this section use a command-line based Git client, but you can use any client. + +## Overview + +For a long time, Node.js has been the go-to runtime for server-side JavaScript applications. However, recent years have introduced new alternative runtimes, including [Deno](https://deno.land/). Like Node.js, Deno is a JavaScript and TypeScript runtime, but it takes a fresh approach with modern security features, a built-in standard library, and native support for TypeScript. + +Why develop Deno applications with Docker? Having a choice of runtimes is exciting, but managing multiple runtimes and their dependencies consistently across environments can be tricky. This is where Docker proves invaluable. Using containers to create and destroy environments on demand simplifies runtime management and ensures consistency. Additionally, as Deno continues to grow and evolve, Docker helps establish a reliable and reproducible development environment, minimizing setup challenges and streamlining the workflow. + +## Get the sample application + +Clone the sample application to use with this guide. Open a terminal, change +directory to a directory that you want to work in, and run the following +command to clone the repository: + +```console +$ git clone https://github.com/dockersamples/docker-deno.git && cd docker-deno +``` + +You should now have the following contents in your `deno-docker` directory. + +```text +├── deno-docker/ +│ ├── compose.yml +│ ├── Dockerfile +│ ├── LICENSE +│ ├── server.ts +│ └── README.md +``` + +## Understand the sample application + +The sample application is a simple Deno application that uses the Oak framework to create a simple API that returns a JSON response. The application listens on port 8000 and returns a message `{"Status" : "OK"}` when you access the application in a browser. + +```typescript +// server.ts +import { Application, Router } from "https://deno.land/x/oak@v12.0.0/mod.ts"; + +const app = new Application(); +const router = new Router(); + +// Define a route that returns JSON +router.get("/", (context) => { + context.response.body = { Status: "OK" }; + context.response.type = "application/json"; +}); + +app.use(router.routes()); +app.use(router.allowedMethods()); + +console.log("Server running on http://localhost:8000"); +await app.listen({ port: 8000 }); +``` + +## Create a Dockerfile + +In the Dockerfile, you'll notice that the `FROM` instruction uses `denoland/deno:latest` +as the base image. This is the official image for Deno. This image is [available on the Docker Hub](https://hub.docker.com/r/denoland/deno). + +```dockerfile +# Use the official Deno image +FROM denoland/deno:latest + +# Set the working directory +WORKDIR /app + +# Copy server code into the container +COPY server.ts . + +# Set permissions (optional but recommended for security) +USER deno + +# Expose port 8000 +EXPOSE 8000 + +# Run the Deno server +CMD ["run", "--allow-net", "server.ts"] +``` + +Aside from specifying `denoland/deno:latest` as the base image, the Dockerfile: + +- Sets the working directory in the container to `/app`. +- Copies `server.ts` into the container. +- Sets the user to `deno` to run the application as a non-root user. +- Exposes port 8000 to allow traffic to the application. +- Runs the Deno server using the `CMD` instruction. +- Uses the `--allow-net` flag to allow network access to the application. The `server.ts` file uses the Oak framework to create a simple API that listens on port 8000. + +## Run the application + +Make sure you are in the `deno-docker` directory. Run the following command in a terminal to build and run the application. + +```console +$ docker compose up --build +``` + +Open a browser and view the application at [http://localhost:8000](http://localhost:8000). You will see a message `{"Status" : "OK"}` in the browser. + +In the terminal, press `ctrl`+`c` to stop the application. + +### Run the application in the background + +You can run the application detached from the terminal by adding the `-d` +option. Inside the `deno-docker` directory, run the following command +in a terminal. + +```console +$ docker compose up --build -d +``` + +Open a browser and view the application at [http://localhost:8000](http://localhost:8000). + + +In the terminal, run the following command to stop the application. + +```console +$ docker compose down +``` + +## Summary + +In this section, you learned how you can containerize and run your Deno +application using Docker. + +Related information: + + - [Dockerfile reference](/reference/dockerfile.md) + - [.dockerignore file](/reference/dockerfile.md#dockerignore-file) + - [Docker Compose overview](/manuals/compose/_index.md) + - [Compose file reference](/reference/compose-file/_index.md) + +## Next steps + +In the next section, you'll learn how you can develop your application using +containers. diff --git a/content/guides/deno/deploy.md b/content/guides/deno/deploy.md new file mode 100644 index 000000000000..447d7f4a85f7 --- /dev/null +++ b/content/guides/deno/deploy.md @@ -0,0 +1,141 @@ +--- +title: Test your Deno deployment +linkTitle: Test your deployment +weight: 50 +keywords: deploy, kubernetes, deno +description: Learn how to develop locally using Kubernetes +aliases: +- /language/deno/deploy/ +--- + +## Prerequisites + +- Complete all the previous sections of this guide, starting with [Containerize a Deno application](containerize.md). +- [Turn on Kubernetes](/manuals//desktop/features/kubernetes.md#install-and-turn-on-kubernetes) in Docker Desktop. + +## Overview + +In this section, you'll learn how to use Docker Desktop to deploy your application to a fully-featured Kubernetes environment on your development machine. This allows you to test and debug your workloads on Kubernetes locally before deploying. + +## Create a Kubernetes YAML file + +In your `deno-docker` directory, create a file named +`docker-kubernetes.yml`. Open the file in an IDE or text editor and add +the following contents. Replace `DOCKER_USERNAME/REPO_NAME` with your Docker +username and the name of the repository that you created in [Configure CI/CD for +your Deno application](configure-ci-cd.md). + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: docker-deno-demo + namespace: default +spec: + replicas: 1 + selector: + matchLabels: + app: deno-api + template: + metadata: + labels: + app: deno-api + spec: + containers: + - name: deno-api + image: DOCKER_USERNAME/REPO_NAME + imagePullPolicy: Always +--- +apiVersion: v1 +kind: Service +metadata: + name: service-entrypoint + namespace: default +spec: + type: NodePort + selector: + app: deno-api + ports: + - port: 8000 + targetPort: 8000 + nodePort: 30001 +``` + +In this Kubernetes YAML file, there are two objects, separated by the `---`: + + - A Deployment, describing a scalable group of identical pods. In this case, + you'll get just one replica, or copy of your pod. That pod, which is + described under `template`, has just one container in it. The + container is created from the image built by GitHub Actions in [Configure CI/CD for + your Deno application](configure-ci-cd.md). + - A NodePort service, which will route traffic from port 30001 on your host to + port 8000 inside the pods it routes to, allowing you to reach your app + from the network. + +To learn more about Kubernetes objects, see the [Kubernetes documentation](https://kubernetes.io/docs/home/). + +## Deploy and check your application + +1. In a terminal, navigate to `deno-docker` and deploy your application to + Kubernetes. + + ```console + $ kubectl apply -f docker-kubernetes.yml + ``` + + You should see output that looks like the following, indicating your Kubernetes objects were created successfully. + + ```text + deployment.apps/docker-deno-demo created + service/service-entrypoint created + ``` + +2. Make sure everything worked by listing your deployments. + + ```console + $ kubectl get deployments + ``` + + Your deployment should be listed as follows: + + ```shell + NAME READY UP-TO-DATE AVAILABLE AGE + docker-deno-demo 1/1 1 1 10s + ``` + + This indicates all one of the pods you asked for in your YAML are up and running. Do the same check for your services. + + ```console + $ kubectl get services + ``` + + You should get output like the following. + + ```shell + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + kubernetes ClusterIP 10.96.0.1 443/TCP 88m + service-entrypoint NodePort 10.105.145.223 8000:30001/TCP 83s + ``` + + In addition to the default `kubernetes` service, you can see your `service-entrypoint` service, accepting traffic on port 30001/TCP. + +3. In a browser, visit the following address. You should see the message `{"Status" : "OK"}`. + + ```console + http://localhost:30001/ + ``` + +4. Run the following command to tear down your application. + + ```console + $ kubectl delete -f docker-kubernetes.yml + ``` + +## Summary + +In this section, you learned how to use Docker Desktop to deploy your Deno application to a fully-featured Kubernetes environment on your development machine. + +Related information: + - [Kubernetes documentation](https://kubernetes.io/docs/home/) + - [Deploy on Kubernetes with Docker Desktop](/manuals/desktop/features/kubernetes.md) + - [Swarm mode overview](/manuals/engine/swarm/_index.md) diff --git a/content/guides/deno/develop.md b/content/guides/deno/develop.md new file mode 100644 index 000000000000..d717823b634d --- /dev/null +++ b/content/guides/deno/develop.md @@ -0,0 +1,75 @@ +--- +title: Use containers for Deno development +linkTitle: Develop your app +weight: 20 +keywords: deno, local, development +description: Learn how to develop your Deno application locally. +aliases: +- /language/deno/develop/ +--- + +## Prerequisites + +Complete [Containerize a Deno application](containerize.md). + +## Overview + +In this section, you'll learn how to set up a development environment for your containerized application. This includes: + +- Configuring Compose to automatically update your running Compose services as you edit and save your code + +## Get the sample application + +Clone the sample application to use with this guide. Open a terminal, change directory to a directory that you want to work in, and run the following command to clone the repository: + +```console +$ git clone https://github.com/dockersamples/docker-deno.git && cd docker-deno +``` + +## Automatically update services + +Use Compose Watch to automatically update your running Compose services as you +edit and save your code. For more details about Compose Watch, see [Use Compose +Watch](/manuals/compose/how-tos/file-watch.md). + +Open your `compose.yml` file in an IDE or text editor and then add the Compose Watch instructions. The following example shows how to add Compose Watch to your `compose.yml` file. + +```yaml {hl_lines="9-12",linenos=true} +services: + server: + image: deno-server + build: + context: . + dockerfile: Dockerfile + ports: + - "8000:8000" + develop: + watch: + - action: rebuild + path: . +``` + +Run the following command to run your application with Compose Watch. + +```console +$ docker compose watch +``` + +Now, if you modify your `server.ts` you will see the changes in real time without re-building the image. + +To test it out, open the `server.ts` file in your favorite text editor and change the message from `{"Status" : "OK"}` to `{"Status" : "Updated"}`. Save the file and refresh your browser at `http://localhost:8000`. You should see the updated message. + +Press `ctrl+c` in the terminal to stop your application. + +## Summary + +In this section, you also learned how to use Compose Watch to automatically rebuild and run your container when you update your code. + +Related information: + - [Compose file reference](/reference/compose-file/) + - [Compose file watch](/manuals/compose/how-tos/file-watch.md) + - [Multi-stage builds](/manuals/build/building/multi-stage.md) + +## Next steps + +In the next section, you'll take a look at how to set up a CI/CD pipeline using GitHub Actions. diff --git a/content/guides/dex.md b/content/guides/dex.md new file mode 100644 index 000000000000..605d6bacc17f --- /dev/null +++ b/content/guides/dex.md @@ -0,0 +1,169 @@ +--- +title: Mocking OAuth services in testing with Dex +description: &desc Mocking OAuth services in testing with Dex +keywords: Dex, container-supported development +linktitle: Mocking OAuth services with Dex +summary: *desc +tags: [app-dev, distributed-systems] +languages: [] +params: + time: 10 minutes +--- + +Dex is an open-source OpenID Connect (OIDC) and OAuth 2.0 identity provider that can be configured to authenticate against various backend identity providers, such as LDAP, SAML, and OAuth. Running Dex in a Docker container allows developers to simulate an OAuth 2.0 server for testing and development purposes. This guide will walk you through setting up Dex as an OAuth mock server using Docker containers. + +Nowadays OAuth is the preferred choice to authenticate in web services, the highest part of them give the possibility to access using popular OAuth services like GitHub, Google or Apple. Using OAuth guarantees a higher level of security and simplification since it is not necessary to create new profiles for each service. This means that, by allowing applications to access resources on behalf of users without sharing passwords, OAuth minimizes the risk of credential exposure. + +In this guide, you'll learn how to: + +- Use Docker to launch up a Dex container. +- Use mock OAuth in the GitHub Action (GHA) without relying on an external OAuth provider. + +## Using Dex with Docker + +The official [Docker image for Dex](https://hub.docker.com/r/dexidp/dex/) provides a convenient way to deploy and manage Dex instances. Dex is available for various CPU architectures, including amd64, armv7, and arm64, ensuring compatibility with different devices and platforms. You can learn more about Dex standalone on the [Dex docs site](https://dexidp.io/docs/getting-started/). + +### Prerequisites + +[Docker Compose](/compose/): Recommended for managing multi-container Docker applications. + +### Setting Up Dex with Docker + +Begin by creating a directory for your Dex project: + +```bash +mkdir dex-mock-server +cd dex-mock-server +``` +Organize your project with the following structure: + +```bash +dex-mock-server/ +├── config.yaml +└── compose.yaml +``` + +Create the Dex Configuration File: +The config.yaml file defines Dex's settings, including connectors, clients, and storage. For a mock server setup, you can use the following minimal configuration: + +```yaml +# config.yaml +issuer: http://localhost:5556/dex +storage: + type: memory +web: + http: 0.0.0.0:5556 +staticClients: + - id: example-app + redirectURIs: + - 'http://localhost:5555/callback' + name: 'Example App' + secret: ZXhhbXBsZS1hcHAtc2VjcmV0 +enablePasswordDB: true +staticPasswords: + - email: "admin@example.com" + hash: "$2a$10$2b2cU8CPhOTaGrs1HRQuAueS7JTT5ZHsHSzYiFPm1leZck7Mc8T4W" + username: "admin" + userID: "1234" +``` + +Explanation: +- issuer: The public URL of Dex. + +- storage: Using in-memory storage for simplicity. + +- web: Dex will listen on port 5556. + +- staticClients: Defines a client application (example-app) with its redirect URI and secret. + +- enablePasswordDB: Enables static password authentication. + +- staticPasswords: Defines a static user for authentication. The hash is a bcrypt hash of the password. + +> [!NOTE] +> +> Ensure the hash is a valid bcrypt hash of your desired password. You can generate this using tools like [bcrypt-generator.com](https://bcrypt-generator.com/). +or use CLI tools like [htpasswd](https://httpd.apache.org/docs/2.4/programs/htpasswd.html) like in this following example:`echo password | htpasswd -BinC 10 admin | cut -d: -f2` + +With Docker Compose configured, start Dex: +```yaml +# docker-compose.yaml + +services: + dex: + image: dexidp/dex:latest + container_name: dex + ports: + - "5556:5556" + volumes: + - ./config.yaml:/etc/dex/config.yaml + command: ["dex", "serve", "/etc/dex/config.yaml"] +``` + +Now it is possible to run the container using the `docker compose` command. +```bash +docker compose up -d +``` + +This command will download the Dex Docker image (if not already available) and start the container in detached mode. + + +To verify that Dex is running, check the logs to ensure Dex started successfully: +```bash +docker compose logs -f dex +``` +You should see output indicating that Dex is listening on the specified port. + +### Using Dex OAuth testing in GHA + +To test the OAuth flow, you'll need a client application configured to authenticate against Dex. One of the most typical use cases is to use it inside GitHub Actions. Since Dex supports mock authentication, you can predefine test users as suggested in the [docs](https://dexidp.io/docs). The `config.yaml` file should looks like: + +```yaml +issuer: http://127.0.0.1:5556/dex + +storage: + type: memory + +web: + http: 0.0.0.0:5556 + +oauth2: + skipApprovalScreen: true + +staticClients: + - name: TestClient + id: client_test_id + secret: client_test_secret + redirectURIs: + - http://{ip-your-app}/path/to/callback/ # example: http://localhost:5555/callback + +connectors: +# mockCallback connector always returns the user 'kilgore@kilgore.trout'. +- type: mockCallback + id: mock + name: Mock +``` +Now you can insert the Dex service inside your `~/.github/workflows/ci.yaml` file: + +```yaml +[...] +jobs: + test-oauth: + runs-on: ubuntu-latest + steps: + - name: Install Dex + run: | + curl -L https://github.com/dexidp/dex/releases/download/v2.37.0/dex_linux_amd64 -o dex + chmod +x dex + + - name: Start Dex Server + run: | + nohup ./dex serve config.yaml > dex.log 2>&1 & + sleep 5 # Give Dex time to start +[...] +``` + + +### Conclusion + +By following this guide, you've set up Dex as an OAuth mock server using Docker. This setup is invaluable for testing and development, allowing you to simulate OAuth flows without relying on external identity providers. For more advanced configurations and integrations, refer to the [Dex documentation](https://dexidp.io/docs/). diff --git a/content/guides/docker-build-cloud/common-questions.md b/content/guides/docker-build-cloud/common-questions.md index c397987620bb..cf3f9e80eb59 100644 --- a/content/guides/docker-build-cloud/common-questions.md +++ b/content/guides/docker-build-cloud/common-questions.md @@ -42,7 +42,7 @@ account and start a trial of Docker Build Cloud. Personal accounts are limited t single user. For teams to receive the shared cache benefit, they must either be on a Docker -Team or Docker Business plan. +Team or Docker Business subscription. ### Does Docker Build Cloud support CI platforms? Does it work with GitHub Actions? diff --git a/content/guides/docker-compose/_index.md b/content/guides/docker-compose/_index.md index 68eeb129a3e6..2763f12c4310 100644 --- a/content/guides/docker-compose/_index.md +++ b/content/guides/docker-compose/_index.md @@ -9,7 +9,6 @@ tags: [product-demo] aliases: - /learning-paths/docker-compose/ params: - featured: true image: images/learning-paths/compose.png time: 10 minutes resource_links: diff --git a/content/guides/dotnet/configure-ci-cd.md b/content/guides/dotnet/configure-ci-cd.md index 3e667a30f288..aeaf21f6882f 100644 --- a/content/guides/dotnet/configure-ci-cd.md +++ b/content/guides/dotnet/configure-ci-cd.md @@ -30,9 +30,9 @@ Create a GitHub repository, configure the Docker Hub credentials, and push your 2. Open the repository **Settings**, and go to **Secrets and variables** > **Actions**. -3. Create a new **Repository variable** named `DOCKER_USERNAME` and your Docker ID as value. +3. Create a new **Repository variable** named `DOCKER_USERNAME` and your Docker ID as a value. -4. Create a new [Personal Access Token (PAT)](/manuals/security/for-developers/access-tokens.md#create-an-access-token) for Docker Hub. You can name this token `docker-tutorial`. Make sure access permissions include Read and Write. +4. Create a new [Personal Access Token (PAT)](/manuals/security/access-tokens.md#create-an-access-token) for Docker Hub. You can name this token `docker-tutorial`. Make sure access permissions include Read and Write. 5. Add the PAT as a **Repository secret** in your GitHub repository, with the name `DOCKERHUB_TOKEN`. diff --git a/content/guides/dotnet/containerize.md b/content/guides/dotnet/containerize.md index 0d297b767e02..146874ca9d8c 100644 --- a/content/guides/dotnet/containerize.md +++ b/content/guides/dotnet/containerize.md @@ -60,7 +60,7 @@ Let's get started! ? What application platform does your project use? ASP.NET Core ? What's the name of your solution's main project? myWebApp -? What version of .NET do you want to use? 6.0 +? What version of .NET do you want to use? 8.0 ? What local port do you want to use to access your server? 8080 ``` diff --git a/content/guides/dotnet/deploy.md b/content/guides/dotnet/deploy.md index ed1b464e9548..837917a0baba 100644 --- a/content/guides/dotnet/deploy.md +++ b/content/guides/dotnet/deploy.md @@ -64,7 +64,7 @@ spec: name: server imagePullPolicy: Always ports: - - containerPort: 80 + - containerPort: 8080 hostPort: 8080 protocol: TCP resources: {} @@ -117,7 +117,7 @@ spec: ports: - name: "8080" port: 8080 - targetPort: 80 + targetPort: 8080 nodePort: 30001 selector: service: server diff --git a/content/guides/dotnet/develop.md b/content/guides/dotnet/develop.md index baf32627ef58..d5bea5491fd1 100644 --- a/content/guides/dotnet/develop.md +++ b/content/guides/dotnet/develop.md @@ -92,7 +92,7 @@ services: context: . target: final ports: - - 8080:80 + - 8080:8080 depends_on: db: condition: service_healthy @@ -182,9 +182,9 @@ $ docker container ls You should see output like the following. ```console -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -cb36e310aa7e docker-dotnet-server "dotnet myWebApp.dll" About a minute ago Up About a minute 0.0.0.0:8080->80/tcp docker-dotnet-server-1 -39fdcf0aff7b postgres "docker-entrypoint.s…" About a minute ago Up About a minute (healthy) 5432/tcp docker-dotnet-db-1 +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +cb36e310aa7e docker-dotnet-server "dotnet myWebApp.dll" About a minute ago Up About a minute 0.0.0.0:8080->8080/tcp docker-dotnet-server-1 +39fdcf0aff7b postgres "docker-entrypoint.s…" About a minute ago Up About a minute (healthy) 5432/tcp docker-dotnet-db-1 ``` In the previous example, the container ID is `39fdcf0aff7b`. Run the following command to connect to the postgres database in the container. Replace the container ID with your own container ID. @@ -241,7 +241,7 @@ services: context: . target: final ports: - - 8080:80 + - 8080:8080 depends_on: db: condition: service_healthy @@ -307,19 +307,19 @@ The following is the updated Dockerfile. ```Dockerfile {hl_lines="10-13"} # syntax=docker/dockerfile:1 -FROM --platform=$BUILDPLATFORM mcr.microsoft.com/dotnet/sdk:6.0-alpine AS build +FROM --platform=$BUILDPLATFORM mcr.microsoft.com/dotnet/sdk:8.0-alpine AS build ARG TARGETARCH COPY . /source WORKDIR /source/src RUN --mount=type=cache,id=nuget,target=/root/.nuget/packages \ dotnet publish -a ${TARGETARCH/amd64/x64} --use-current-runtime --self-contained false -o /app -FROM mcr.microsoft.com/dotnet/sdk:6.0-alpine AS development +FROM mcr.microsoft.com/dotnet/sdk:8.0-alpine AS development COPY . /source WORKDIR /source/src CMD dotnet run --no-launch-profile -FROM mcr.microsoft.com/dotnet/aspnet:6.0-alpine AS final +FROM mcr.microsoft.com/dotnet/aspnet:8.0-alpine AS final WORKDIR /app COPY --from=build /app . ARG UID=10001 @@ -337,14 +337,14 @@ ENTRYPOINT ["dotnet", "myWebApp.dll"] The following is the updated `compose.yaml` file. -```yaml {hl_lines="5"} +```yaml {hl_lines=[5,15,16]} services: server: build: context: . target: development ports: - - 8080:80 + - 8080:8080 depends_on: db: condition: service_healthy @@ -354,7 +354,6 @@ services: path: . environment: - ASPNETCORE_ENVIRONMENT=Development - - ASPNETCORE_URLS=http://+:80' db: image: postgres restart: always @@ -380,7 +379,7 @@ secrets: file: db/password.txt ``` -Your containerized application will now use the `mcr.microsoft.com/dotnet/sdk:6.0-alpine` image, which includes development tools like `dotnet test`. Continue to the next section to learn how you can run `dotnet test`. +Your containerized application will now use the `mcr.microsoft.com/dotnet/sdk:8.0-alpine` image, which includes development tools like `dotnet test`. Continue to the next section to learn how you can run `dotnet test`. ## Summary diff --git a/content/guides/dotnet/run-tests.md b/content/guides/dotnet/run-tests.md index 1a6f2eb2ee70..1e404c345965 100644 --- a/content/guides/dotnet/run-tests.md +++ b/content/guides/dotnet/run-tests.md @@ -36,7 +36,7 @@ You should see output that contains the following. Starting test execution, please wait... A total of 1 test files matched the specified pattern. -Passed! - Failed: 0, Passed: 1, Skipped: 0, Total: 1, Duration: < 1 ms - /source/tests/bin/Debug/net6.0/tests.dll (net6.0) +Passed! - Failed: 0, Passed: 1, Skipped: 0, Total: 1, Duration: < 1 ms - /source/tests/bin/Debug/net8.0/tests.dll (net8.0) ``` To learn more about the command, see [docker compose run](/reference/cli/docker/compose/run/). @@ -50,7 +50,7 @@ The following is the updated Dockerfile. ```dockerfile {hl_lines="9"} # syntax=docker/dockerfile:1 -FROM --platform=$BUILDPLATFORM mcr.microsoft.com/dotnet/sdk:6.0-alpine AS build +FROM --platform=$BUILDPLATFORM mcr.microsoft.com/dotnet/sdk:8.0-alpine AS build ARG TARGETARCH COPY . /source WORKDIR /source/src @@ -58,12 +58,12 @@ RUN --mount=type=cache,id=nuget,target=/root/.nuget/packages \ dotnet publish -a ${TARGETARCH/amd64/x64} --use-current-runtime --self-contained false -o /app RUN dotnet test /source/tests -FROM mcr.microsoft.com/dotnet/sdk:6.0-alpine AS development +FROM mcr.microsoft.com/dotnet/sdk:8.0-alpine AS development COPY . /source WORKDIR /source/src CMD dotnet run --no-launch-profile -FROM mcr.microsoft.com/dotnet/aspnet:6.0-alpine AS final +FROM mcr.microsoft.com/dotnet/aspnet:8.0-alpine AS final WORKDIR /app COPY --from=build /app . ARG UID=10001 @@ -92,16 +92,16 @@ You should see output containing the following. #11 1.564 Determining projects to restore... #11 3.421 Restored /source/src/myWebApp.csproj (in 1.02 sec). #11 19.42 Restored /source/tests/tests.csproj (in 17.05 sec). -#11 27.91 myWebApp -> /source/src/bin/Debug/net6.0/myWebApp.dll -#11 28.47 tests -> /source/tests/bin/Debug/net6.0/tests.dll -#11 28.49 Test run for /source/tests/bin/Debug/net6.0/tests.dll (.NETCoreApp,Version=v6.0) +#11 27.91 myWebApp -> /source/src/bin/Debug/net8.0/myWebApp.dll +#11 28.47 tests -> /source/tests/bin/Debug/net8.0/tests.dll +#11 28.49 Test run for /source/tests/bin/Debug/net8.0/tests.dll (.NETCoreApp,Version=v8.0) #11 28.67 Microsoft (R) Test Execution Command Line Tool Version 17.3.3 (x64) #11 28.67 Copyright (c) Microsoft Corporation. All rights reserved. #11 28.68 #11 28.97 Starting test execution, please wait... #11 29.03 A total of 1 test files matched the specified pattern. #11 32.07 -#11 32.08 Passed! - Failed: 0, Passed: 1, Skipped: 0, Total: 1, Duration: < 1 ms - /source/tests/bin/Debug/net6.0/tests.dll (net6.0) +#11 32.08 Passed! - Failed: 0, Passed: 1, Skipped: 0, Total: 1, Duration: < 1 ms - /source/tests/bin/Debug/net8.0/tests.dll (net8.0) #11 DONE 32.2s ``` diff --git a/content/guides/frameworks/laravel/_index.md b/content/guides/frameworks/laravel/_index.md index 6e7bd293b8bb..d0d28400a6a2 100644 --- a/content/guides/frameworks/laravel/_index.md +++ b/content/guides/frameworks/laravel/_index.md @@ -31,7 +31,7 @@ The demonstrated examples can be found in [this GitHub repository](https://githu This guide is intended for educational purposes, helping developers adapt and optimize configurations for their specific use cases. Additionally, there are existing tools that support Laravel in containers: -- [Laravel Sail](https://laravel.com/docs/11.x/sail): An official package for easily starting Laravel in Docker. +- [Laravel Sail](https://laravel.com/docs/12.x/sail): An official package for easily starting Laravel in Docker. - [Laradock](https://github.com/laradock/laradock): A community project that helps run Laravel applications in Docker. ## What you’ll learn diff --git a/content/guides/frameworks/laravel/development-setup.md b/content/guides/frameworks/laravel/development-setup.md index ffa427c139b3..54f2d9f6685f 100644 --- a/content/guides/frameworks/laravel/development-setup.md +++ b/content/guides/frameworks/laravel/development-setup.md @@ -119,7 +119,7 @@ A workspace container provides a dedicated shell for asset compilation, Artisan/ ```dockerfile # docker/development/workspace/Dockerfile # Use the official PHP CLI image as the base -FROM php:8.3-cli +FROM php:8.4-cli # Set environment variables for user and group ID ARG UID=1000 diff --git a/content/guides/frameworks/laravel/prerequisites.md b/content/guides/frameworks/laravel/prerequisites.md index 4ea2dec3d5a7..89f109f7f754 100644 --- a/content/guides/frameworks/laravel/prerequisites.md +++ b/content/guides/frameworks/laravel/prerequisites.md @@ -19,7 +19,7 @@ A fundamental understanding of Docker and how containers work will be helpful. I ## Basic knowledge of Laravel -This guide assumes you have a basic understanding of Laravel and PHP. Familiarity with Laravel’s command-line tools, such as [Artisan](https://laravel.com/docs/11.x/artisan), and its project structure is important for following the instructions. +This guide assumes you have a basic understanding of Laravel and PHP. Familiarity with Laravel’s command-line tools, such as [Artisan](https://laravel.com/docs/12.x/artisan), and its project structure is important for following the instructions. - Laravel CLI: You should be comfortable using Laravel’s command-line tool (`artisan`). - Laravel Project Structure: Familiarize yourself with Laravel’s folder structure (`app`, `config`, `routes`, `tests`, etc.). diff --git a/content/guides/frameworks/laravel/production-setup.md b/content/guides/frameworks/laravel/production-setup.md index 61f99009b63e..4e9a975f1704 100644 --- a/content/guides/frameworks/laravel/production-setup.md +++ b/content/guides/frameworks/laravel/production-setup.md @@ -45,7 +45,7 @@ For production, the `php-fpm` Dockerfile creates an optimized image with only th ```dockerfile # Stage 1: Build environment and Composer dependencies -FROM php:8.3-fpm AS builder +FROM php:8.4-fpm AS builder # Install system dependencies and PHP extensions for Laravel with MySQL/PostgreSQL support. # Dependencies in this stage are only required for building the final image. @@ -98,7 +98,7 @@ RUN curl -sS https://getcomposer.org/installer | php -- --install-dir=/usr/local && composer install --no-dev --optimize-autoloader --no-interaction --no-progress --prefer-dist # Stage 2: Production environment -FROM php:8.3-fpm +FROM php:8.4-fpm # Install only runtime libraries needed in production # libfcgi-bin and procps are required for the php-fpm-healthcheck script @@ -173,7 +173,7 @@ If you need a separate CLI container with different extensions or strict separat ```dockerfile # Stage 1: Build environment and Composer dependencies -FROM php:8.3-cli AS builder +FROM php:8.4-cli AS builder # Install system dependencies and PHP extensions required for Laravel + MySQL/PostgreSQL support # Some dependencies are required for PHP extensions only in the build stage @@ -211,7 +211,7 @@ RUN curl -sS https://getcomposer.org/installer | php -- --install-dir=/usr/local && composer install --no-dev --optimize-autoloader --no-interaction --no-progress --prefer-dist # Stage 2: Production environment -FROM php:8.3-cli +FROM php:8.4-cli # Install client libraries required for php extensions in runtime RUN apt-get update && apt-get install -y --no-install-recommends \ @@ -244,7 +244,7 @@ USER www-data CMD ["bash"] ``` -This Dockerfile is similar to the PHP-FPM Dockerfile, but it uses the `php:8.3-cli` image as the base image and sets up the container for running CLI commands. +This Dockerfile is similar to the PHP-FPM Dockerfile, but it uses the `php:8.4-cli` image as the base image and sets up the container for running CLI commands. ## Create a Dockerfile for Nginx (production) diff --git a/content/guides/genai-leveraging-rag/image-1.png b/content/guides/genai-leveraging-rag/image-1.png new file mode 100644 index 000000000000..31b758c53341 Binary files /dev/null and b/content/guides/genai-leveraging-rag/image-1.png differ diff --git a/content/guides/genai-leveraging-rag/image-2.png b/content/guides/genai-leveraging-rag/image-2.png new file mode 100644 index 000000000000..186797ce4042 Binary files /dev/null and b/content/guides/genai-leveraging-rag/image-2.png differ diff --git a/content/guides/genai-leveraging-rag/image-3.png b/content/guides/genai-leveraging-rag/image-3.png new file mode 100644 index 000000000000..7b700101b98c Binary files /dev/null and b/content/guides/genai-leveraging-rag/image-3.png differ diff --git a/content/guides/genai-leveraging-rag/image.png b/content/guides/genai-leveraging-rag/image.png new file mode 100644 index 000000000000..d44001ffc4b7 Binary files /dev/null and b/content/guides/genai-leveraging-rag/image.png differ diff --git a/content/guides/genai-leveraging-rag/index.md b/content/guides/genai-leveraging-rag/index.md new file mode 100644 index 000000000000..50fa1ee4ae38 --- /dev/null +++ b/content/guides/genai-leveraging-rag/index.md @@ -0,0 +1,242 @@ +--- +title: Leveraging RAG in GenAI to teach new information +linkTitle: Leveraging RAG in GenAI +description: This guide walks through the process of setting up and utilizing a GenAI stack with Retrieval-Augmented Generation (RAG) systems and graph databases. Learn how to integrate graph databases like Neo4j with AI models for more accurate, contextually-aware responses. +keywords: Docker, GenAI, Retrieval-Augmented Generation, RAG, Graph Databases, Neo4j, AI, LLM +summary: | + This guide explains setting up a GenAI stack with Retrieval-Augmented Generation (RAG) and Neo4j, covering key concepts, deployment steps, and a case study. It also includes troubleshooting tips for optimizing AI performance with real-time data. +tags: [ai] +params: + time: 35 minutes +--- + +## Introduction + +Retrieval-Augmented Generation (RAG) is a powerful framework that enhances large language models (LLMs) by integrating information retrieval from external knowledge sources. This guide focuses on a specialized RAG implementation using graph databases like Neo4j, which excel in managing highly connected, relational data. Unlike traditional RAG setups with vector databases, combining RAG with graph databases offers better context-awareness and relationship-driven insights. + +In this guide, you will: + +* Explore the advantages of integrating graph databases into a RAG framework. +* Configure a GenAI stack with Docker, incorporating Neo4j and an AI model. +* Analyze a real-world case study that highlights the effectiveness of this approach for handling specialized queries. + +## Understanding RAG + +RAG is a hybrid framework that enhances the capabilities of large language models by integrating information retrieval. It combines three core components: + +- **Information retrieval** from an external knowledge base +- **Large Language Model (LLM)** for generating responses +- **Vector embeddings** to enable semantic search + +In a RAG system, vector embeddings are used to represent the semantic meaning of text in a way that a machine can understand and process. For instance, the words "dog" and "puppy" will have similar embeddings because they share similar meanings. By integrating these embeddings into the RAG framework, the system can combine the generative power of large language models with the ability to pull in highly relevant, contextually-aware data from external sources. + +The system operates as follows: +1. Questions get turned into mathematical patterns that capture their meaning +2. These patterns help find matching information in a database +3. The LLM generates responses that blend the model's inherent knowledge with the this extra information. + +To hold this vector information in an efficient manner, we need a special type of database. + +## Introduction to Graph databases + +Graph databases, such as Neo4j, are specifically designed for managing highly connected data. Unlike traditional relational databases, graph databases prioritize both the entities and the relationships between them, making them ideal for tasks where connections are as important as the data itself. + +Graph databases stand out for their unique approach to data storage and querying. They use nodes (or vertices) to represent entities and edges to represent the relationships between these entities. This structure allows for efficient handling of highly connected data and complex queries, which are difficult to manage in traditional database systems. + +SQL databases and graph databases differ significantly in their data models. SQL databases use a tabular structure with rows and columns, where relationships between entities are established using foreign keys. This approach works well for structured data and predefined relationships. In contrast, graph databases represent data as nodes (entities) and edges (relationships), making the representation of relationships more intuitive and flexible. This structure is particularly advantageous for complex, interconnected data. + +Another key difference lies in schema flexibility. SQL databases operate on a rigid schema, meaning any changes to the data structure, such as adding new columns or altering relationships, typically require careful planning and migration processes. Graph databases, however, are schema-optional, allowing for much greater flexibility. New nodes, edges, or properties can be introduced without disrupting existing data, enabling faster adaptation to changing requirements. + +## Practical implementation: testing RAG effectiveness + +To illustrate the power of RAG systems in practice, let's examine a real-world implementation using Apache NiFi as our subject matter. This case study demonstrates how RAG can enhance an AI's ability to provide accurate, contextual information about specialized technical topics. + +### Teaching AI about new technologies + +Apache NiFi serves as an excellent example of the limitations of traditional LLMs and how RAG can overcome them. As a relatively recent technology, many LLMs have limited or outdated knowledge about it, making it a perfect candidate for demonstrating RAG's capabilities. + +Without RAG, the LLM may provide broad responses or acknowledge its limited knowledge when asked about information that is beyond its cutoff date. The following example demonstrates this. +```text +Question: What important events happened in 2024? +RAG: Disabled +I'm happy to help! Unfortunately, I'm a large language model, I don't have access to real-time information or events that occurred after my training data cutoff in 2024. Therefore, I cannot provide you with any important events that happened in 2024. My apologize for any inconvenience this may cause. Is there anything else I can help you with? +``` + +## Setting up GenAI stack with GPU acceleration on Linux + +To set up and run the GenAI stack on a Linux host, execute one of the following commands, either for GPU or CPU powered: + +### GPU powered + +```bash +git clone https://github.com/docker/genai-stack +docker compose --profile linux-gpu up -d +mv env.example .env +nano .env +``` +In the `.env` file, make sure following lines are commented out. Set your own credentials for security + + ```txt + NEO4J_URI=neo4j://database:7687 + NEO4J_USERNAME=neo4j + NEO4J_PASSWORD=password + OLLAMA_BASE_URL=http://llm-gpu:11434 + ``` + +### CPU powered + +```bash +git clone https://github.com/docker/genai-stack +docker compose --profile linux up -d +mv env.example .env +nano .env +``` +In the `.env` file, make sure following lines are commented out. Set your own credentials for security + + ```txt + NEO4J_URI=neo4j://database:7687 + NEO4J_USERNAME=neo4j + NEO4J_PASSWORD=password + OLLAMA_BASE_URL=http://llm:11434 + ``` + +### Setting up on other platforms + +For instructions on how to set up the stack on other platforms, refer to [this page](https://github.com/docker/genai-stack). + +### Initial startup + +The first startup may take some time because the system needs to download a large language model. + +### Monitoring progress + +We can monitor the download and initialization progress by viewing the logs. Run the following command to view the logs: + +```bash +docker compose logs +``` + +Wait for specific lines in the logs indicating that the download is complete and the stack is ready. These lines typically confirm successful setup and initialization. + + ```text + pull-model-1 exited with code 0 + database-1 | 2024-12-29 09:35:53.269+0000 INFO Started. + pdf_bot-1 | You can now view your Streamlit app in your browser. + loader-1 | You can now view your Streamlit app in your browser. + bot-1 | You can now view your Streamlit app in your browser. + ``` + +You can now access the interface at [http://localhost:8501/](http://localhost:8501/) to ask questions. For example, you can try the sample question: + +When we see those lines in the logs, web apps are ready to be used. + +Since our goal is to teach AI about things it does not yet know, we begin by asking it a simple question about Nifi at +[http://localhost:8501/](http://localhost:8501/). +![alt text](image.png) + +```text +Question: What is Apache Nifi? +RAG: Disabled +Hello! I'm here to help you with your question about Apache NiFi. Unfortunately, I don't know the answer to that question. I'm just an AI and my knowledge cutoff is December 2022, so I may not be familiar with the latest technologies or software. Can you please provide more context or details about Apache NiFi? Maybe there's something I can help you with related to it. +``` + +As we can see, AI does not know anything about this subject because it did not exist during the time of its training, also known as the information cutoff point. + +Now it's time to teach the AI some new tricks. First, connect to [http://localhost:8502/](http://localhost:8502/). Instead of using the "neo4j" tag, change it to the "apache-nifi" tag, then select the **Import** button. + +![alt text](image-1.png) + +After the import is successful, we can access Neo4j to verify the data. + +After logging in to [http://localhost:7474/](http://localhost:7474/) using the credentials from the `.env` file, you can run queries on Neo4j. Using the Neo4j Cypher query language, you can check for the data stored in the database. + +To count the data, run the following query: + +```text +MATCH (n) +RETURN DISTINCT labels(n) AS NodeTypes, count(*) AS Count +ORDER BY Count DESC; +``` + +To execute this query, write in the box on the top and select the blue run button. + +![alt text](image-2.png) + +Results will appear below. What we are seeing here is the information system downloaded from Stack Overflow and saved in the graph database. RAG will utilize this information to enhance its responses. + +You can also run the following query to visualize the data: + +```text +CALL db.schema.visualization() +``` + +To check the relationships in the database, run the following query: + +```text +CALL db.relationshipTypes() +``` + +Now, we are ready to enable our LLM to use this information. Go back to [http://localhost:8501/](http://localhost:8501/), enable the **RAG** checkbox, and ask the same question again. The LLM will now provide a more detailed answer. + +![alt text](image-3.png) + +The system delivers comprehensive, accurate information by pulling from current technical documentation. +```text +Question: What is Apache Nifi? +RAG: Enabled + +Answer: +Apache NiFi is an open-source, distributed data processing system that allows you to collect, transform, and transport data across various sources and sinks. It provides a graphical user interface for designing and managing data flows, as well as a set of APIs for integrating with other systems. +According to this Stack Overflow question, the free Neo4J JDBC driver doesn't understand SQL, only Cypher, so you would need to use a non-free BI Connector to connect to Neo4j from Apache Drill. +Additionally, this Stack Overflow question suggests that Apache POI does work in a Neo4j User Defined Function. However, there may be an unknown glitch in the standalone UDF mentioned in the initial question. +To export data from Neo4j to Excel using APOC Extended procedures, you can use apoc.export.xls.query which takes a Cypher query and exports the results to an Excel file. + +References: +How to connect and query Neo4j Database on Apache Drill? +Is a Neo4j UDF compatible with Apache POI? +``` + +Keep in mind that new questions will be added to Stack Overflow, and due to the inherent randomness in most AI models, the answers may vary and won't be identical to those in this example. + +Feel free to start over with another [Stack Overflow tag](https://stackoverflow.com/tags). To drop all data in Neo4j, you can use the following command in the Neo4j Web UI: + +```txt +MATCH (n) +DETACH DELETE n; +``` + +For optimal results, choose a tag that the LLM is not familiar with. + +### When to leverage RAG for optimal results + +Retrieval-Augmented Generation (RAG) is particularly effective in scenarios where standard Large Language Models (LLMs) fall short. The three key areas where RAG excels are knowledge limitations, business requirements, and cost efficiency. Below, we explore these aspects in more detail. + +#### Overcoming knowledge limitations + +LLMs are trained on a fixed dataset up until a certain point in time. This means they lack access to: + +* Real-time information: LLMs do not continuously update their knowledge, so they may not be aware of recent events, newly released research, or emerging technologies. +* Specialized knowledge: Many niche subjects, proprietary frameworks, or industry-specific best practices may not be well-documented in the model’s training corpus. +* Accurate contextual understanding: LLMs can struggle with nuances or evolving terminologies that frequently change within dynamic fields like finance, cybersecurity, or medical research. + +By incorporating RAG with a graph database such as Neo4j, AI models can access and retrieve the latest, relevant, and highly connected data before generating a response. This ensures that answers are up-to-date and grounded in factual information rather than inferred approximations. + +#### Addressing business and compliance needs + +Organizations in industries like healthcare, legal services, and financial analysis require their AI-driven solutions to be: + +* Accurate: Businesses need AI-generated content that is factual and relevant to their specific domain. +* Compliant: Many industries must adhere to strict regulations regarding data usage and security. +* Traceable: Enterprises often require AI responses to be auditable, meaning they need to reference source material. + +By using RAG, AI-generated answers can be sourced from trusted databases, ensuring higher accuracy and compliance with industry standards. This mitigates risks such as misinformation or regulatory violations. + +#### Enhancing cost efficiency and performance + +Training and fine-tuning large AI models can be computationally expensive and time-consuming. However, integrating RAG provides: + +* Reduced fine-tuning needs: Instead of retraining an AI model every time new data emerges, RAG allows the model to fetch and incorporate updated information dynamically. +* Better performance with smaller models: With the right retrieval techniques, even compact AI models can perform well by leveraging external knowledge efficiently. +* Lower operational costs: Instead of investing in expensive infrastructure to support large-scale retraining, businesses can optimize resources by utilizing RAG’s real-time retrieval capabilities. + +By following this guide, you now have the foundational knowledge to implement RAG with Neo4j, enabling your AI system to deliver more accurate, relevant, and insightful responses. The next step is experimentation—choose a dataset, configure your stack, and start enhancing your AI with the power of retrieval-augmented generation. \ No newline at end of file diff --git a/content/guides/genai-pdf-bot/_index.md b/content/guides/genai-pdf-bot/_index.md index e30ca5b853b2..41c5e16626e7 100644 --- a/content/guides/genai-pdf-bot/_index.md +++ b/content/guides/genai-pdf-bot/_index.md @@ -18,5 +18,3 @@ The generative AI (GenAI) guide teaches you how to containerize an existing GenA - Set up a local environment to run the complete GenAI stack locally for development Start by containerizing an existing GenAI application. - -{{< button text="Containerize a GenAI app" url="containerize.md" >}} diff --git a/content/guides/gha.md b/content/guides/gha.md index 04e7d497e3f1..1512c2eac9aa 100644 --- a/content/guides/gha.md +++ b/content/guides/gha.md @@ -51,7 +51,7 @@ that, you must authenticate with your Docker credentials (username and access token) as part of the GitHub Actions workflow. For instructions on how to create a Docker access token, see -[Create and manage access tokens](/manuals/security/for-developers/access-tokens.md). +[Create and manage access tokens](/manuals/security/access-tokens.md). Once you have your Docker credentials ready, add the credentials to your GitHub repository so you can use them in GitHub Actions: diff --git a/content/guides/go-prometheus-monitoring/_index.md b/content/guides/go-prometheus-monitoring/_index.md new file mode 100644 index 000000000000..99f49630f800 --- /dev/null +++ b/content/guides/go-prometheus-monitoring/_index.md @@ -0,0 +1,40 @@ +--- +description: Containerize a Golang application and monitor it with Prometheus and Grafana. +keywords: golang, prometheus, grafana, monitoring, containerize +title: Monitor a Golang application with Prometheus and Grafana +summary: | + Learn how to containerize a Golang application and monitor it with Prometheus and Grafana. +linkTitle: Monitor with Prometheus and Grafana +languages: [go] +params: + time: 45 minutes +--- + +The guide teaches you how to containerize a Golang application and monitor it with Prometheus and Grafana. + +> **Acknowledgment** +> +> Docker would like to thank [Pradumna Saraf](https://twitter.com/pradumna_saraf) for his contribution to this guide. + +## Overview + +To make sure your application is working as intended, monitoring is important. One of the most popular monitoring tools is Prometheus. Prometheus is an open-source monitoring and alerting toolkit that is designed for reliability and scalability. It collects metrics from monitored targets by scraping metrics HTTP endpoints on these targets. To visualize the metrics, you can use Grafana. Grafana is an open-source platform for monitoring and observability that allows you to query, visualize, alert on, and understand your metrics no matter where they are stored. + +In this guide, you will be creating a Golang server with some endpoints to simulate a real-world application. Then you will expose metrics from the server using Prometheus. Finally, you will visualize the metrics using Grafana. You will containerize the Golang application, and using the Docker Compose file, you will connect all the services: Golang, Prometheus, and Grafana. + +## What will you learn? + +* Create a Golang application with custom Prometheus metrics. +* Containerize a Golang application. +* Use Docker Compose to run multiple services and connect them together to monitor a Golang application with Prometheus and Grafana. +* Visualize the metrics using Grafana dashboards. + +## Prerequisites + +- A good understanding of Golang is assumed. +- You must me familiar with Prometheus and creating dashboards in Grafana. +- You must have familiarity with Docker concepts like containers, images, and Dockerfiles. If you are new to Docker, you can start with the [Docker basics](/get-started/docker-concepts/the-basics/what-is-a-container.md) guide. + +## Next steps + +You will create a Golang server and expose metrics using Prometheus. diff --git a/content/guides/go-prometheus-monitoring/application.md b/content/guides/go-prometheus-monitoring/application.md new file mode 100644 index 000000000000..9845b9e127e1 --- /dev/null +++ b/content/guides/go-prometheus-monitoring/application.md @@ -0,0 +1,250 @@ +--- +title: Building the application +linkTitle: Understand the application +weight: 10 # +keywords: go, golang, prometheus, grafana, containerize, monitor +description: Learn how to create a Golang server to register metrics with Prometheus. +--- + +## Prerequisites + +* You have a [Git client](https://git-scm.com/downloads). The examples in this section use a command-line based Git client, but you can use any client. + +You will be creating a Golang server with some endpoints to simulate a real-world application. Then you will expose metrics from the server using Prometheus. + +## Getting the sample application + +Clone the sample application to use with this guide. Open a terminal, change +directory to a directory that you want to work in, and run the following +command to clone the repository: + +```console +$ git clone https://github.com/dockersamples/go-prometheus-monitoring.git +``` + +Once you cloned you will see the following content structure inside `go-prometheus-monitoring` directory, + +```text +go-prometheus-monitoring +├── CONTRIBUTING.md +├── Docker +│ ├── grafana.yml +│ └── prometheus.yml +├── dashboard.json +├── Dockerfile +├── LICENSE +├── README.md +├── compose.yaml +├── go.mod +├── go.sum +└── main.go +``` + +- **main.go** - The entry point of the application. +- **go.mod and go.sum** - Go module files. +- **Dockerfile** - Dockerfile used to build the app. +- **Docker/** - Contains the Docker Compose configuration files for Grafana and Prometheus. +- **compose.yaml** - Compose file to launch everything (Golang app, Prometheus, and Grafana). +- **dashboard.json** - Grafana dashboard configuration file. +- **Dockerfile** - Dockerfile used to build the Golang app. +- **compose.yaml** - Docker Compose file to launch everything (Golang app, Prometheus, and Grafana). +- Other files are for licensing and documentation purposes. + +## Understanding the application + +The following is the complete logic of the application you will find in `main.go`. + +```go +package main + +import ( + "strconv" + + "github.com/gin-gonic/gin" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +// Define metrics +var ( + HttpRequestTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "api_http_request_total", + Help: "Total number of requests processed by the API", + }, []string{"path", "status"}) + + HttpRequestErrorTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "api_http_request_error_total", + Help: "Total number of errors returned by the API", + }, []string{"path", "status"}) +) + +// Custom registry (without default Go metrics) +var customRegistry = prometheus.NewRegistry() + +// Register metrics with custom registry +func init() { + customRegistry.MustRegister(HttpRequestTotal, HttpRequestErrorTotal) +} + +func main() { + router := gin.Default() + + // Register /metrics before middleware + router.GET("/metrics", PrometheusHandler()) + + router.Use(RequestMetricsMiddleware()) + router.GET("/health", func(c *gin.Context) { + c.JSON(200, gin.H{ + "message": "Up and running!", + }) + }) + router.GET("/v1/users", func(c *gin.Context) { + c.JSON(200, gin.H{ + "message": "Hello from /v1/users", + }) + }) + + router.Run(":8000") +} + +// Custom metrics handler with custom registry +func PrometheusHandler() gin.HandlerFunc { + h := promhttp.HandlerFor(customRegistry, promhttp.HandlerOpts{}) + return func(c *gin.Context) { + h.ServeHTTP(c.Writer, c.Request) + } +} + +// Middleware to record incoming requests metrics +func RequestMetricsMiddleware() gin.HandlerFunc { + return func(c *gin.Context) { + path := c.Request.URL.Path + c.Next() + status := c.Writer.Status() + if status < 400 { + HttpRequestTotal.WithLabelValues(path, strconv.Itoa(status)).Inc() + } else { + HttpRequestErrorTotal.WithLabelValues(path, strconv.Itoa(status)).Inc() + } + } +} +``` + +In this part of the code, you have imported the required packages `gin`, `prometheus`, and `promhttp`. Then you have defined a couple of variables, `HttpRequestTotal` and `HttpRequestErrorTotal` are Prometheus counter metrics, and `customRegistry` is a custom registry that will be used to register these metrics. The name of the metric is a string that you can use to identify the metric. The help string is a string that will be shown when you query the `/metrics` endpoint to understand the metric. The reason you are using the custom registry is so avoid the default Go metrics that are registered by default by the Prometheus client. Then using the `init` function you are registering the metrics with the custom registry. + +```go +import ( + "strconv" + + "github.com/gin-gonic/gin" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +// Define metrics +var ( + HttpRequestTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "api_http_request_total", + Help: "Total number of requests processed by the API", + }, []string{"path", "status"}) + + HttpRequestErrorTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "api_http_request_error_total", + Help: "Total number of errors returned by the API", + }, []string{"path", "status"}) +) + +// Custom registry (without default Go metrics) +var customRegistry = prometheus.NewRegistry() + +// Register metrics with custom registry +func init() { + customRegistry.MustRegister(HttpRequestTotal, HttpRequestErrorTotal) +} +``` + +In the `main` function, you have created a new instance of the `gin` framework and created three routes. You can see the health endpoint that is on path `/health` that will return a JSON with `{"message": "Up and running!"}` and the `/v1/users` endpoint that will return a JSON with `{"message": "Hello from /v1/users"}`. The third route is for the `/metrics` endpoint that will return the metrics in the Prometheus format. Then you have `RequestMetricsMiddleware` middleware, it will be called for every request made to the API. It will record the incoming requests metrics like status codes and paths. Finally, you are running the gin application on port 8000. + +```golang +func main() { + router := gin.Default() + + // Register /metrics before middleware + router.GET("/metrics", PrometheusHandler()) + + router.Use(RequestMetricsMiddleware()) + router.GET("/health", func(c *gin.Context) { + c.JSON(200, gin.H{ + "message": "Up and running!", + }) + }) + router.GET("/v1/users", func(c *gin.Context) { + c.JSON(200, gin.H{ + "message": "Hello from /v1/users", + }) + }) + + router.Run(":8000") +} +``` + +Now comes the middleware function `RequestMetricsMiddleware`. This function is called for every request made to the API. It increments the `HttpRequestTotal` counter (different counter for different paths and status codes) if the status code is less than or equal to 400. If the status code is greater than 400, it increments the `HttpRequestErrorTotal` counter (different counter for different paths and status codes). The `PrometheusHandler` function is the custom handler that will be called for the `/metrics` endpoint. It will return the metrics in the Prometheus format. + +```golang +// Custom metrics handler with custom registry +func PrometheusHandler() gin.HandlerFunc { + h := promhttp.HandlerFor(customRegistry, promhttp.HandlerOpts{}) + return func(c *gin.Context) { + h.ServeHTTP(c.Writer, c.Request) + } +} + +// Middleware to record incoming requests metrics +func RequestMetricsMiddleware() gin.HandlerFunc { + return func(c *gin.Context) { + path := c.Request.URL.Path + c.Next() + status := c.Writer.Status() + if status < 400 { + HttpRequestTotal.WithLabelValues(path, strconv.Itoa(status)).Inc() + } else { + HttpRequestErrorTotal.WithLabelValues(path, strconv.Itoa(status)).Inc() + } + } +} +``` + +That's it, this was the complete gist of the application. Now it's time to run and test if the app is registering metrics correctly. + +## Running the application + +Make sure you are still inside `go-prometheus-monitoring` directory in the terminal, and run the following command. Install the dependencies by running `go mod tidy` and then build and run the application by running `go run main.go`. Then visit `http://localhost:8000/health` or `http://localhost:8000/v1/users`. You should see the output `{"message": "Up and running!"}` or `{"message": "Hello from /v1/users"}`. If you are able to see this then your app is successfully up and running. + + +Now, check your application's metrics by accessing the `/metrics` endpoint. +Open `http://localhost:8000/metrics` in your browser. You should see similar output to the following. + +```sh +# HELP api_http_request_error_total Total number of errors returned by the API +# TYPE api_http_request_error_total counter +api_http_request_error_total{path="/",status="404"} 1 +api_http_request_error_total{path="//v1/users",status="404"} 1 +api_http_request_error_total{path="/favicon.ico",status="404"} 1 +# HELP api_http_request_total Total number of requests processed by the API +# TYPE api_http_request_total counter +api_http_request_total{path="/health",status="200"} 2 +api_http_request_total{path="/v1/users",status="200"} 1 +``` + +In the terminal, press `ctrl` + `c` to stop the application. + +> [!Note] +> If you don't want to run the application locally, and want to run it in a Docker container, skip to next page where you create a Dockerfile and containerize the application. + +## Summary + +In this section, you learned how to create a Golang app to register metrics with Prometheus. By implementing middleware functions, you were able to increment the counters based on the request path and status codes. + +## Next steps + +In the next section, you'll learn how to containerize your application. diff --git a/content/guides/go-prometheus-monitoring/compose.md b/content/guides/go-prometheus-monitoring/compose.md new file mode 100644 index 000000000000..499e065759f4 --- /dev/null +++ b/content/guides/go-prometheus-monitoring/compose.md @@ -0,0 +1,166 @@ +--- +title: Connecting services with Docker Compose +linkTitle: Connecting services with Docker Compose +weight: 30 # +keywords: go, golang, prometheus, grafana, containerize, monitor +description: Learn how to connect services with Docker Compose to monitor a Golang application with Prometheus and Grafana. +--- + +Now that you have containerized the Golang application, you will use Docker Compose to connect your services together. You will connect the Golang application, Prometheus, and Grafana services together to monitor the Golang application with Prometheus and Grafana. + +## Creating a Docker Compose file + +Create a new file named `compose.yml` in the root directory of your Golang application. The Docker Compose file contains instructions to run multiple services and connect them together. + +Here is a Docker Compose file for a project that uses Golang, Prometheus, and Grafana. You will also find this file in the `go-prometheus-monitoring` directory. + +```yaml +services: + api: + container_name: go-api + build: + context: . + dockerfile: Dockerfile + image: go-api:latest + ports: + - 8000:8000 + networks: + - go-network + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8000/health"] + interval: 30s + timeout: 10s + retries: 5 + develop: + watch: + - path: . + action: rebuild + + prometheus: + container_name: prometheus + image: prom/prometheus:v2.55.0 + volumes: + - ./Docker/prometheus.yml:/etc/prometheus/prometheus.yml + ports: + - 9090:9090 + networks: + - go-network + + grafana: + container_name: grafana + image: grafana/grafana:11.3.0 + volumes: + - ./Docker/grafana.yml:/etc/grafana/provisioning/datasources/datasource.yaml + - grafana-data:/var/lib/grafana + ports: + - 3000:3000 + networks: + - go-network + environment: + - GF_SECURITY_ADMIN_USER=admin + - GF_SECURITY_ADMIN_PASSWORD=password + +volumes: + grafana-data: + +networks: + go-network: + driver: bridge +``` + +## Understanding the Docker Compose file + +The Docker Compose file consists of three services: + +- **Golang application service**: This service builds the Golang application using the Dockerfile and runs it in a container. It exposes the application's port `8000` and connects to the `go-network` network. It also defines a health check to monitor the application's health. You have also used `healthcheck` to monitor the health of the application. The health check runs every 30 seconds and retries 5 times if the health check fails. The health check uses the `curl` command to check the `/health` endpoint of the application. Apart from the health check, you have also added a `develop` section to watch the changes in the application's source code and rebuild the application using the Docker Compose Watch feature. + +- **Prometheus service**: This service runs the Prometheus server in a container. It uses the official Prometheus image `prom/prometheus:v2.55.0`. It exposes the Prometheus server on port `9090` and connects to the `go-network` network. You have also mounted the `prometheus.yml` file from the `Docker` directory which is present in the root directory of your project. The `prometheus.yml` file contains the Prometheus configuration to scrape the metrics from the Golang application. This is how you connect the Prometheus server to the Golang application. + + ```yaml + global: + scrape_interval: 10s + evaluation_interval: 10s + + scrape_configs: + - job_name: myapp + static_configs: + - targets: ["api:8000"] + ``` + + In the `prometheus.yml` file, you have defined a job named `myapp` to scrape the metrics from the Golang application. The `targets` field specifies the target to scrape the metrics from. In this case, the target is the Golang application running on port `8000`. The `api` is the service name of the Golang application in the Docker Compose file. The Prometheus server will scrape the metrics from the Golang application every 10 seconds. + +- **Grafana service**: This service runs the Grafana server in a container. It uses the official Grafana image `grafana/grafana:11.3.0`. It exposes the Grafana server on port `3000` and connects to the `go-network` network. You have also mounted the `grafana.yml` file from the `Docker` directory which is present in the root directory of your project. The `grafana.yml` file contains the Grafana configuration to add the Prometheus data source. This is how you connect the Grafana server to the Prometheus server. In the environment variables, you have set the Grafana admin user and password, which will be used to log in to the Grafana dashboard. + + ```yaml + apiVersion: 1 + datasources: + - name: Prometheus (Main) + type: prometheus + url: http://prometheus:9090 + isDefault: true + ``` + + In the `grafana.yml` file, you have defined a Prometheus data source named `Prometheus (Main)`. The `type` field specifies the type of the data source, which is `prometheus`. The `url` field specifies the URL of the Prometheus server to fetch the metrics from. In this case, the URL is `http://prometheus:9090`. `prometheus` is the service name of the Prometheus server in the Docker Compose file. The `isDefault` field specifies whether the data source is the default data source in Grafana. + +Apart from the services, the Docker Compose file also defines a volume named `grafana-data` to persist the Grafana data and a network named `go-network` to connect the services together. You have created a custom network `go-network` to connect the services together. The `driver: bridge` field specifies the network driver to use for the network. + +## Building and running the services + +Now that you have the Docker Compose file, you can build the services and run them together using Docker Compose. + +To build and run the services, run the following command in the terminal: + +```console +$ docker compose up +``` + +The `docker compose up` command builds the services defined in the Docker Compose file and runs them together. You will see the similar output in the terminal: + +```console + ✔ Network go-prometheus-monitoring_go-network Created 0.0s + ✔ Container grafana Created 0.3s + ✔ Container go-api Created 0.2s + ✔ Container prometheus Created 0.3s +Attaching to go-api, grafana, prometheus +go-api | [GIN-debug] [WARNING] Creating an Engine instance with the Logger and Recovery middleware already attached. +go-api | +go-api | [GIN-debug] [WARNING] Running in "debug" mode. Switch to "release" mode in production. +go-api | - using env: export GIN_MODE=release +go-api | - using code: gin.SetMode(gin.ReleaseMode) +go-api | +go-api | [GIN-debug] GET /metrics --> main.PrometheusHandler.func1 (3 handlers) +go-api | [GIN-debug] GET /health --> main.main.func1 (4 handlers) +go-api | [GIN-debug] GET /v1/users --> main.main.func2 (4 handlers) +go-api | [GIN-debug] [WARNING] You trusted all proxies, this is NOT safe. We recommend you to set a value. +go-api | Please check https://pkg.go.dev/github.com/gin-gonic/gin#readme-don-t-trust-all-proxies for details. +go-api | [GIN-debug] Listening and serving HTTP on :8000 +prometheus | ts=2025-03-15T05:57:06.676Z caller=main.go:627 level=info msg="No time or size retention was set so using the default time retention" duration=15d +prometheus | ts=2025-03-15T05:57:06.678Z caller=main.go:671 level=info msg="Starting Prometheus Server" mode=server version="(version=2.55.0, branch=HEAD, revision=91d80252c3e528728b0f88d254dd720f6be07cb8)" +grafana | logger=settings t=2025-03-15T05:57:06.865335506Z level=info msg="Config overridden from command line" arg="default.log.mode=console" +grafana | logger=settings t=2025-03-15T05:57:06.865337131Z level=info msg="Config overridden from Environment variable" var="GF_PATHS_DATA=/var/lib/grafana" +grafana | logger=ngalert.state.manager t=2025-03-15T05:57:07.088956839Z level=info msg="State +. +. +grafana | logger=plugin.angulardetectorsprovider.dynamic t=2025-03-15T05:57:07.530317298Z level=info msg="Patterns update finished" duration=440.489125ms +``` + +The services will start running, and you can access the Golang application at `http://localhost:8000`, Prometheus at `http://localhost:9090/health`, and Grafana at `http://localhost:3000`. You can also check the running containers using the `docker ps` command. + +```console +$ docker ps +``` + +## Summary + +In this section, you learned how to connect services together using Docker Compose. You created a Docker Compose file to run multiple services together and connect them using networks. You also learned how to build and run the services using Docker Compose. + +Related information: + + - [Docker Compose overview](/manuals/compose/_index.md) + - [Compose file reference](/reference/compose-file/_index.md) + +Next, you will learn how to develop the Golang application with Docker Compose and monitor it with Prometheus and Grafana. + +## Next steps + +In the next section, you will learn how to develop the Golang application with Docker. You will also learn how to use Docker Compose Watch to rebuild the image whenever you make changes to the code. Lastly, you will test the application and visualize the metrics in Grafana using Prometheus as the data source. diff --git a/content/guides/go-prometheus-monitoring/containerize.md b/content/guides/go-prometheus-monitoring/containerize.md new file mode 100644 index 000000000000..a628c380618f --- /dev/null +++ b/content/guides/go-prometheus-monitoring/containerize.md @@ -0,0 +1,103 @@ +--- +title: Containerize a Golang application +linkTitle: Containerize your app +weight: 20 +keywords: go, golang, containerize, initialize +description: Learn how to containerize a Golang application. +--- + +Containerization helps you bundle the application and its dependencies into a single package called a container. This package can run on any platform without worrying about the environment. In this section, you will learn how to containerize a Golang application using Docker. + +To containerize a Golang application, you first need to create a Dockerfile. The Dockerfile contains instructions to build and run the application in a container. Also, when creating a Dockerfile, you can follow different sets of best practices to optimize the image size and make it more secure. + +## Creating a Dockerfile + +Create a new file named `Dockerfile` in the root directory of your Golang application. The Dockerfile contains instructions to build and run the application in a container. + +The following is a Dockerfile for a Golang application. You will also find this file in the `go-prometheus-monitoring` directory. + +```dockerfile +# Use the official Golang image as the base +FROM golang:1.24-alpine AS builder + +# Set environment variables +ENV CGO_ENABLED=0 \ + GOOS=linux \ + GOARCH=amd64 + +# Set working directory inside the container +WORKDIR /build + +# Copy go.mod and go.sum files for dependency installation +COPY go.mod go.sum ./ + +# Download dependencies +RUN go mod download + +# Copy the entire application source +COPY . . + +# Build the Go binary +RUN go build -o /app . + +# Final lightweight stage +FROM alpine:3.21 AS final + +# Copy the compiled binary from the builder stage +COPY --from=builder /app /bin/app + +# Expose the application's port +EXPOSE 8000 + +# Run the application +CMD ["bin/app"] +``` + +## Understanding the Dockerfile + +The Dockerfile consists of two stages: + +1. **Build stage**: This stage uses the official Golang image as the base and sets the necessary environment variables. It also sets the working directory inside the container, copies the `go.mod` and `go.sum` files for dependency installation, downloads the dependencies, copies the entire application source, and builds the Go binary. + + You use the `golang:1.24-alpine` image as the base image for the build stage. The `CGO_ENABLED=0` environment variable disables CGO, which is useful for building static binaries. You also set the `GOOS` and `GOARCH` environment variables to `linux` and `amd64`, respectively, to build the binary for the Linux platform. + +2. **Final stage**: This stage uses the official Alpine image as the base and copies the compiled binary from the build stage. It also exposes the application's port and runs the application. + + You use the `alpine:3.21` image as the base image for the final stage. You copy the compiled binary from the build stage to the final image. You expose the application's port using the `EXPOSE` instruction and run the application using the `CMD` instruction. + + Apart from the multi-stage build, the Dockerfile also follows best practices such as using the official images, setting the working directory, and copying only the necessary files to the final image. You can further optimize the Dockerfile by other best practices. + +## Build the Docker image and run the application + +One you have the Dockerfile, you can build the Docker image and run the application in a container. + +To build the Docker image, run the following command in the terminal: + +```console +$ docker build -t go-api:latest . +``` + +After building the image, you can run the application in a container using the following command: + +```console +$ docker run -p 8000:8000 go-api:latest +``` + +The application will start running inside the container, and you can access it at `http://localhost:8000`. You can also check the running containers using the `docker ps` command. + +```console +$ docker ps +``` + +## Summary + +In this section, you learned how to containerize a Golang application using a Dockerfile. You created a multi-stage Dockerfile to build and run the application in a container. You also learned about best practices to optimize the Docker image size and make it more secure. + +Related information: + + - [Dockerfile reference](/reference/dockerfile.md) + - [.dockerignore file](/reference/dockerfile.md#dockerignore-file) + +## Next steps + +In the next section, you will learn how to use Docker Compose to connect and run multiple services together to monitor a Golang application with Prometheus and Grafana. diff --git a/content/guides/go-prometheus-monitoring/develop.md b/content/guides/go-prometheus-monitoring/develop.md new file mode 100644 index 000000000000..7cf147604f5d --- /dev/null +++ b/content/guides/go-prometheus-monitoring/develop.md @@ -0,0 +1,84 @@ +--- +title: Developing your application +linkTitle: Develop your app +weight: 40 +keywords: go, golang, containerize, initialize +description: Learn how to develop the Golang application with Docker. +--- + +In the last section, you saw how using Docker Compose, you can connect your services together. In this section, you will learn how to develop the Golang application with Docker. You will also see how to use Docker Compose Watch to rebuild the image whenever you make changes to the code. Lastly, you will test the application and visualize the metrics in Grafana using Prometheus as the data source. + +## Developing the application + +Now, if you make any changes to your Golang application locally, it needs to reflect in the container, right? To do that, one approach is use the `--build` flag in Docker Compose after making changes in the code. This will rebuild all the services which have the `build` instruction in the `compose.yml` file, in your case, the `api` service (Golang application). + +```console +docker compose up --build +``` + +But, this is not the best approach. This is not efficient. Every time you make a change in the code, you need to rebuild manually. This is not is not a good flow for development. + +The better approach is to use Docker Compose Watch. In the `compose.yml` file, under the service `api`, you have added the `develop` section. So, it's more like a hot reloading. Whenever you make changes to code (defined in `path`), it will rebuild the image (or restart depending on the action). This is how you can use it: + +```yaml {hl_lines="17-20",linenos=true} +services: + api: + container_name: go-api + build: + context: . + dockerfile: Dockerfile + image: go-api:latest + ports: + - 8000:8000 + networks: + - go-network + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval: 30s + timeout: 10s + retries: 5 + develop: + watch: + - path: . + action: rebuild +``` + +Once you have added the `develop` section in the `compose.yml` file, you can use the following command to start the development server: + +```console +$ docker compose watch +``` + +Now, if you modify your `main.go` or any other file in the project, the `api` service will be rebuilt automatically. You will see the following output in the terminal: + +```bash +Rebuilding service(s) ["api"] after changes were detected... +[+] Building 8.1s (15/15) FINISHED docker:desktop-linux + => [api internal] load build definition from Dockerfile 0.0s + => => transferring dockerfile: 704B 0.0s + => [api internal] load metadata for docker.io/library/alpine:3.17 1.1s + . + => => exporting manifest list sha256:89ebc86fd51e27c1da440dc20858ff55fe42211a1930c2d51bbdce09f430c7f1 0.0s + => => naming to docker.io/library/go-api:latest 0.0s + => => unpacking to docker.io/library/go-api:latest 0.0s + => [api] resolving provenance for metadata file 0.0s +service(s) ["api"] successfully built +``` + +## Testing the application + +Now that you have your application running, head over to the Grafana dashboard to visualize the metrics you are registering. Open your browser and navigate to `http://localhost:3000`. You will be greeted with the Grafana login page. The login credentials are the ones provided in Compose file. + +Once you are logged in, you can create a new dashboard. While creating dashboard you will notice that is default data source is `Prometheus`. This is because you have already configured the data source in the `grafana.yml` file. + +![The optional settings screen with the options specified.](../images/grafana-dash.png) + +You can use different panels to visualize the metrics. This guide doesn't go into details of Grafana. You can refer to the [Grafana documentation](https://grafana.com/docs/grafana/latest/) for more information. There is a Bar Gauge panel to visualize the total number of requests from different endpoints. You used the `api_http_request_total` and `api_http_request_error_total` metrics to get the data. + +![The optional settings screen with the options specified.](../images/grafana-panel.png) + +You created this panel to visualize the total number of requests from different endpoints to compare the successful and failed requests. For all the good requests, the bar will be green, and for all the failed requests, the bar will be red. Plus it will also show the from which endpoint the request is coming, either it's a successful request or a failed request. If you want to use this panel, you can import the `dashboard.json` file from the repository you cloned. + +## Summary + +You've come to the end of this guide. You learned how to develop the Golang application with Docker. You also saw how to use Docker Compose Watch to rebuild the image whenever you make changes to the code. Lastly, you tested the application and visualized the metrics in Grafana using Prometheus as the data source. \ No newline at end of file diff --git a/content/guides/golang/configure-ci-cd.md b/content/guides/golang/configure-ci-cd.md index 5ef7d63a19f9..7f3943d319cf 100644 --- a/content/guides/golang/configure-ci-cd.md +++ b/content/guides/golang/configure-ci-cd.md @@ -30,9 +30,9 @@ Create a GitHub repository, configure the Docker Hub credentials, and push your 2. Open the repository **Settings**, and go to **Secrets and variables** > **Actions**. -3. Create a new **Repository variable** named `DOCKER_USERNAME` and your Docker ID as value. +3. Create a new **Repository variable** named `DOCKER_USERNAME` and your Docker ID as a value. -4. Create a new [Personal Access Token (PAT)](/manuals/security/for-developers/access-tokens.md#create-an-access-token) for Docker Hub. You can name this token `docker-tutorial`. Make sure access permissions include Read and Write. +4. Create a new [Personal Access Token (PAT)](/manuals/security/access-tokens.md#create-an-access-token) for Docker Hub. You can name this token `docker-tutorial`. Make sure access permissions include Read and Write. 5. Add the PAT as a **Repository secret** in your GitHub repository, with the name `DOCKERHUB_TOKEN`. diff --git a/content/guides/golang/develop.md b/content/guides/golang/develop.md index 53497bc9af60..c76a693f776f 100644 --- a/content/guides/golang/develop.md +++ b/content/guides/golang/develop.md @@ -521,7 +521,7 @@ In this section, you'll create a Docker Compose file to start your `docker-gs-pi ### Configure Docker Compose -In your application's directory, create a new text file named `docker-compose.yml` with the following content. +In your application's directory, create a new text file named `compose.yaml` with the following content. ```yaml version: "3.8" @@ -582,7 +582,7 @@ The exact value doesn't really matter for this example, because you run Cockroac ### Merging Compose files -The file name `docker-compose.yml` is the default file name which `docker compose` command recognizes if no `-f` flag is provided. This means you can have multiple Docker Compose files if your environment has such requirements. Furthermore, Docker Compose files are... composable (pun intended), so multiple files can be specified on the command line to merge parts of the configuration together. The following list is just a few examples of scenarios where such a feature would be very useful: +The file name `compose.yaml` is the default file name which `docker compose` command recognizes if no `-f` flag is provided. This means you can have multiple Docker Compose files if your environment has such requirements. Furthermore, Docker Compose files are... composable (pun intended), so multiple files can be specified on the command line to merge parts of the configuration together. The following list is just a few examples of scenarios where such a feature would be very useful: - Using a bind mount for the source code for local development but not when running the CI tests; - Switching between using a pre-built image for the frontend for some API application vs creating a bind mount for source code; @@ -608,7 +608,7 @@ Before you apply changes made to a Compose configuration file, there is an oppor $ docker compose config ``` -When this command is run, Docker Compose reads the file `docker-compose.yml`, parses it into a data structure in memory, validates where possible, and prints back the reconstruction of that configuration file from its internal representation. If this isn't possible due to errors, Docker prints an error message instead. +When this command is run, Docker Compose reads the file `compose.yaml`, parses it into a data structure in memory, validates where possible, and prints back the reconstruction of that configuration file from its internal representation. If this isn't possible due to errors, Docker prints an error message instead. ### Build and run the application using Docker Compose diff --git a/content/guides/images/agentic-ai-app.png b/content/guides/images/agentic-ai-app.png new file mode 100644 index 000000000000..2cde645ffb9a Binary files /dev/null and b/content/guides/images/agentic-ai-app.png differ diff --git a/content/guides/images/agentic-ai-diagram.webp b/content/guides/images/agentic-ai-diagram.webp new file mode 100644 index 000000000000..bc89353fb41f Binary files /dev/null and b/content/guides/images/agentic-ai-diagram.webp differ diff --git a/content/guides/images/grafana-dash.png b/content/guides/images/grafana-dash.png new file mode 100644 index 000000000000..73c55c8ad4d4 Binary files /dev/null and b/content/guides/images/grafana-dash.png differ diff --git a/content/guides/images/grafana-panel.png b/content/guides/images/grafana-panel.png new file mode 100644 index 000000000000..0b04168b0727 Binary files /dev/null and b/content/guides/images/grafana-panel.png differ diff --git a/content/guides/java/configure-ci-cd.md b/content/guides/java/configure-ci-cd.md index 554a02f06702..67a17a28e999 100644 --- a/content/guides/java/configure-ci-cd.md +++ b/content/guides/java/configure-ci-cd.md @@ -30,9 +30,9 @@ Create a GitHub repository, configure the Docker Hub credentials, and push your 2. Open the repository **Settings**, and go to **Secrets and variables** > **Actions**. -3. Create a new **Repository variable** named `DOCKER_USERNAME` and your Docker ID as value. +3. Create a new **Repository variable** named `DOCKER_USERNAME` and your Docker ID as a value. -4. Create a new [Personal Access Token (PAT)](/manuals/security/for-developers/access-tokens.md#create-an-access-token) for Docker Hub. You can name this token `docker-tutorial`. Make sure access permissions include Read and Write. +4. Create a new [Personal Access Token (PAT)](/manuals/security/access-tokens.md#create-an-access-token) for Docker Hub. You can name this token `docker-tutorial`. Make sure access permissions include Read and Write. 5. Add the PAT as a **Repository secret** in your GitHub repository, with the name `DOCKERHUB_TOKEN`. diff --git a/content/guides/java/containerize.md b/content/guides/java/containerize.md index aa2e235e5c83..b3cbfff03783 100644 --- a/content/guides/java/containerize.md +++ b/content/guides/java/containerize.md @@ -204,7 +204,7 @@ services: # start the database before your application. The `db-data` volume persists the # database data between container restarts. The `db-password` secret is used # to set the database password. You must create `db/password.txt` and add -# a password of your choosing to it before running `docker-compose up`. +# a password of your choosing to it before running `docker compose up`. # depends_on: # db: # condition: service_healthy diff --git a/content/guides/jupyter.md b/content/guides/jupyter.md index 3f78c73a7858..9697b3556dbd 100644 --- a/content/guides/jupyter.md +++ b/content/guides/jupyter.md @@ -371,7 +371,7 @@ To share your image and data, you'll use [Docker Hub](https://hub.docker.com/). 4. Verify that you pushed the image to Docker Hub. 1. Go to [Docker Hub](https://hub.docker.com). - 2. Select **Repositories**. + 2. Select **My Hub** > **Repositories**. 3. View the **Last pushed** time for your repository. Other users can now download and run your image using the `docker run` command. They need to replace `YOUR-USER-NAME` with your Docker ID. @@ -395,7 +395,7 @@ This example uses the Docker Desktop graphical user interface. Alternatively, in 8. Select **Save**. 9. Verify that you exported the volume to Docker Hub. 1. Go to [Docker Hub](https://hub.docker.com). - 2. Select **Repositories**. + 2. Select **My Hub** > **Repositories**. 3. View the **Last pushed** time for your repository. Other users can now download and import your volume. To import the volume and then run it with your image: diff --git a/content/guides/kafka.md b/content/guides/kafka.md index 9ce2940b1979..78153a04f7ed 100644 --- a/content/guides/kafka.md +++ b/content/guides/kafka.md @@ -235,7 +235,7 @@ To add it to your own project (it’s already in the demo application), you only ```yaml services: kafka-ui: - image: ghcr.io/kafbat/kafka-ui:latest + image: kafbat/kafka-ui:main ports: - 8080:8080 environment: diff --git a/content/guides/language-translation.md b/content/guides/language-translation.md index 64ad491c935f..751230ce4ade 100644 --- a/content/guides/language-translation.md +++ b/content/guides/language-translation.md @@ -315,7 +315,7 @@ To run the application using Docker: > > For Windows users, you may get an error when running the container. Verify > that the line endings in the `entrypoint.sh` are `LF` (`\n`) and not `CRLF` (`\r\n`), - > then rebuild the image. For more details, see [Avoid unexpected syntax errors, use Unix style line endings for files in containers](/desktop/troubleshoot-and-support/troubleshoot/topics/#avoid-unexpected-syntax-errors-use-unix-style-line-endings-for-files-in-containers). + > then rebuild the image. For more details, see [Avoid unexpected syntax errors, use Unix style line endings for files in containers](/desktop/troubleshoot-and-support/troubleshoot/topics/#Unexpected-syntax-errors-use-Unix-style-line endings-for-files-in-containers). You will see the following in your console after the container starts. diff --git a/content/guides/named-entity-recognition.md b/content/guides/named-entity-recognition.md index 8c5e087c072b..82b4149a9e88 100644 --- a/content/guides/named-entity-recognition.md +++ b/content/guides/named-entity-recognition.md @@ -318,7 +318,7 @@ To run the application using Docker: > > For Windows users, you may get an error when running the container. Verify > that the line endings in the `entrypoint.sh` are `LF` (`\n`) and not `CRLF` (`\r\n`), - > then rebuild the image. For more details, see [Avoid unexpected syntax errors, use Unix style line endings for files in containers](/desktop/troubleshoot-and-support/troubleshoot/topics/#avoid-unexpected-syntax-errors-use-unix-style-line-endings-for-files-in-containers). + > then rebuild the image. For more details, see [Avoid unexpected syntax errors, use Unix style line endings for files in containers](/desktop/troubleshoot-and-support/troubleshoot/topics/#Unexpected-syntax-errors-use-Unix-style-line endings-for-files-in-containers). You will see the following in your console after the container starts. diff --git a/content/guides/nodejs/configure-ci-cd.md b/content/guides/nodejs/configure-ci-cd.md index b1215aff478c..c951b37b5d7e 100644 --- a/content/guides/nodejs/configure-ci-cd.md +++ b/content/guides/nodejs/configure-ci-cd.md @@ -30,9 +30,9 @@ Create a GitHub repository, configure the Docker Hub credentials, and push your 2. Open the repository **Settings**, and go to **Secrets and variables** > **Actions**. -3. Create a new **Repository variable** named `DOCKER_USERNAME` and your Docker ID as value. +3. Create a new **Repository variable** named `DOCKER_USERNAME` and your Docker ID as a value. -4. Create a new [Personal Access Token (PAT)](/manuals/security/for-developers/access-tokens.md#create-an-access-token) for Docker Hub. You can name this token `docker-tutorial`. Make sure access permissions include Read and Write. +4. Create a new [Personal Access Token (PAT)](/manuals/security/access-tokens.md#create-an-access-token) for Docker Hub. You can name this token `docker-tutorial`. Make sure access permissions include Read and Write. 5. Add the PAT as a **Repository secret** in your GitHub repository, with the name `DOCKERHUB_TOKEN`. diff --git a/content/guides/nodejs/containerize.md b/content/guides/nodejs/containerize.md index 97d945ca941b..0f54039de933 100644 --- a/content/guides/nodejs/containerize.md +++ b/content/guides/nodejs/containerize.md @@ -31,7 +31,7 @@ directory to a directory that you want to work in, and run the following command to clone the repository: ```console -$ git clone https://github.com/docker/docker-nodejs-sample +$ git clone https://github.com/docker/docker-nodejs-sample && cd docker-nodejs-sample ``` ## Initialize Docker assets @@ -141,7 +141,7 @@ services: # start the database before your application. The `db-data` volume persists the # database data between container restarts. The `db-password` secret is used # to set the database password. You must create `db/password.txt` and add -# a password of your choosing to it before running `docker-compose up`. +# a password of your choosing to it before running `docker compose up`. # depends_on: # db: # condition: service_healthy diff --git a/content/guides/nodejs/develop.md b/content/guides/nodejs/develop.md index 0d1afc4e69a3..0e892375182c 100644 --- a/content/guides/nodejs/develop.md +++ b/content/guides/nodejs/develop.md @@ -58,7 +58,7 @@ You can use containers to set up local services, like a database. In this sectio # start the database before your application. The `db-data` volume persists the # database data between container restarts. The `db-password` secret is used # to set the database password. You must create `db/password.txt` and add - # a password of your choosing to it before running `docker-compose up`. + # a password of your choosing to it before running `docker compose up`. depends_on: db: @@ -128,7 +128,7 @@ You can use containers to set up local services, like a database. In this sectio # start the database before your application. The `db-data` volume persists the # database data between container restarts. The `db-password` secret is used # to set the database password. You must create `db/password.txt` and add - # a password of your choosing to it before running `docker-compose up`. + # a password of your choosing to it before running `docker compose up`. depends_on: db: @@ -188,7 +188,7 @@ You can use containers to set up local services, like a database. In this sectio # start the database before your application. The `db-data` volume persists the # database data between container restarts. The `db-password` secret is used # to set the database password. You must create `db/password.txt` and add - # a password of your choosing to it before running `docker-compose up`. + # a password of your choosing to it before running `docker compose up`. depends_on: db: diff --git a/content/guides/orchestration.md b/content/guides/orchestration.md index d63d54e0b5fd..8127e98e611a 100644 --- a/content/guides/orchestration.md +++ b/content/guides/orchestration.md @@ -41,7 +41,7 @@ Docker Desktop sets up Kubernetes for you quickly and easily. Follow the setup a 1. From the Docker Dashboard, navigate to **Settings**, and select the **Kubernetes** tab. -2. Select the checkbox labeled **Enable Kubernetes**, and select **Apply & Restart**. Docker Desktop automatically sets up Kubernetes for you. You'll know that Kubernetes has been successfully enabled when you see a green light beside 'Kubernetes _running_' in **Settings**. +2. Select the checkbox labeled **Enable Kubernetes**, and select **Apply**. Docker Desktop automatically sets up Kubernetes for you. You'll know that Kubernetes has been successfully enabled when you see a green light beside 'Kubernetes _running_' in **Settings**. 3. To confirm that Kubernetes is up and running, create a text file called `pod.yaml` with the following content: @@ -107,7 +107,7 @@ Docker Desktop sets up Kubernetes for you quickly and easily. Follow the setup a 1. From the Docker Dashboard, navigate to **Settings**, and select the **Kubernetes** tab. -2. Select the checkbox labeled **Enable Kubernetes**, and select **Apply & Restart**. Docker Desktop automatically sets up Kubernetes for you. You'll know that Kubernetes has been successfully enabled when you see a green light beside 'Kubernetes _running_' in the **Settings** menu. +2. Select the checkbox labeled **Enable Kubernetes**, and select **Apply**. Docker Desktop automatically sets up Kubernetes for you. You'll know that Kubernetes has been successfully enabled when you see a green light beside 'Kubernetes _running_' in the **Settings** menu. 3. To confirm that Kubernetes is up and running, create a text file called `pod.yaml` with the following content: diff --git a/content/guides/php/configure-ci-cd.md b/content/guides/php/configure-ci-cd.md index ba6319e37678..747aa2339bee 100644 --- a/content/guides/php/configure-ci-cd.md +++ b/content/guides/php/configure-ci-cd.md @@ -30,9 +30,9 @@ Create a GitHub repository, configure the Docker Hub credentials, and push your 2. Open the repository **Settings**, and go to **Secrets and variables** > **Actions**. -3. Create a new **Repository variable** named `DOCKER_USERNAME` and your Docker ID as value. +3. Create a new **Repository variable** named `DOCKER_USERNAME` and your Docker ID as a value. -4. Create a new [Personal Access Token (PAT)](/manuals/security/for-developers/access-tokens.md#create-an-access-token) for Docker Hub. You can name this token `docker-tutorial`. Make sure access permissions include Read and Write. +4. Create a new [Personal Access Token (PAT)](/manuals/security/access-tokens.md#create-an-access-token) for Docker Hub. You can name this token `docker-tutorial`. Make sure access permissions include Read and Write. 5. Add the PAT as a **Repository secret** in your GitHub repository, with the name `DOCKERHUB_TOKEN`. diff --git a/content/guides/php/develop.md b/content/guides/php/develop.md index b503f34b1000..865f2de9f14b 100644 --- a/content/guides/php/develop.md +++ b/content/guides/php/develop.md @@ -189,7 +189,7 @@ You can easily add services to your application stack by updating the `compose.y Update your `compose.yaml` to add a new service for phpMyAdmin. For more details, see the [phpMyAdmin Official Docker Image](https://hub.docker.com/_/phpmyadmin). The following is the updated `compose.yaml` file. -```yaml {hl_lines="35-42"} +```yaml {hl_lines="42-49"} services: server: build: diff --git a/content/guides/python/_index.md b/content/guides/python/_index.md index 221c540f1ed4..6489a6d67d14 100644 --- a/content/guides/python/_index.md +++ b/content/guides/python/_index.md @@ -15,10 +15,17 @@ params: time: 20 minutes --- +> **Acknowledgment** +> +> This guide is a community contribution. Docker would like to thank +> [Esteban Maya](https://www.linkedin.com/in/esteban-x64/) and [Igor Aleksandrov](https://www.linkedin.com/in/igor-aleksandrov/) for their contribution +> to this guide. + The Python language-specific guide teaches you how to containerize a Python application using Docker. In this guide, you’ll learn how to: - Containerize and run a Python application - Set up a local environment to develop a Python application using containers +- Lint, format, typing and best practices - Configure a CI/CD pipeline for a containerized Python application using GitHub Actions - Deploy your containerized Python application locally to Kubernetes to test and debug your deployment diff --git a/content/guides/python/configure-github-actions.md b/content/guides/python/configure-github-actions.md new file mode 100644 index 000000000000..b13e2dfbb2e3 --- /dev/null +++ b/content/guides/python/configure-github-actions.md @@ -0,0 +1,136 @@ +--- +title: Automate your builds with GitHub Actions +linkTitle: Automate your builds with GitHub Actions +weight: 40 +keywords: ci/cd, github actions, python, flask +description: Learn how to configure CI/CD using GitHub Actions for your Python application. +aliases: + - /language/python/configure-ci-cd/ + - /guides/language/python/configure-ci-cd/ + - /guides/python/configure-ci-cd/ +--- + +## Prerequisites + +Complete all the previous sections of this guide, starting with [Containerize a Python application](containerize.md). You must have a [GitHub](https://github.com/signup) account and a [Docker](https://hub.docker.com/signup) account to complete this section. + +If you didn't create a [GitHub repository](https://github.com/new) for your project yet, it is time to do it. After creating the repository, don't forget to [add a remote](https://docs.github.com/en/get-started/getting-started-with-git/managing-remote-repositories) and ensure you can commit and [push your code](https://docs.github.com/en/get-started/using-git/pushing-commits-to-a-remote-repository#about-git-push) to GitHub. + +1. In your project's GitHub repository, open **Settings**, and go to **Secrets and variables** > **Actions**. + +2. Under the **Variables** tab, create a new **Repository variable** named `DOCKER_USERNAME` and your Docker ID as a value. + +3. Create a new [Personal Access Token (PAT)](/manuals/security/access-tokens.md#create-an-access-token) for Docker Hub. You can name this token `docker-tutorial`. Make sure access permissions include Read and Write. + +4. Add the PAT as a **Repository secret** in your GitHub repository, with the name + `DOCKERHUB_TOKEN`. + +## Overview + +GitHub Actions is a CI/CD (Continuous Integration and Continuous Deployment) automation tool built into GitHub. It allows you to define custom workflows for building, testing, and deploying your code when specific events occur (e.g., pushing code, creating a pull request, etc.). A workflow is a YAML-based automation script that defines a sequence of steps to be executed when triggered. Workflows are stored in the `.github/workflows/` directory of a repository. + +In this section, you'll learn how to set up and use GitHub Actions to build your Docker image as well as push it to Docker Hub. You will complete the following steps: + +1. Define the GitHub Actions workflow. +2. Run the workflow. + +## 1. Define the GitHub Actions workflow + +You can create a GitHub Actions workflow by creating a YAML file in the `.github/workflows/` directory of your repository. To do this use your favorite text editor or the GitHub web interface. The following steps show you how to create a workflow file using the GitHub web interface. + +If you prefer to use the GitHub web interface, follow these steps: + +1. Go to your repository on GitHub and then select the **Actions** tab. + +2. Select **set up a workflow yourself**. + + This takes you to a page for creating a new GitHub Actions workflow file in + your repository. By default, the file is created under `.github/workflows/main.yml`, let's change it name to `build.yml`. + +If you prefer to use your text editor, create a new file named `build.yml` in the `.github/workflows/` directory of your repository. + +Add the following content to the file: + +```yaml +name: Build and push Docker image + +on: + push: + branches: + - main + +jobs: + lint-test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.12' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + + - name: Run pre-commit hooks + run: pre-commit run --all-files + + - name: Run pyright + run: pyright + + build_and_push: + runs-on: ubuntu-latest + steps: + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ vars.DOCKER_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Build and push + uses: docker/build-push-action@v6 + with: + push: true + tags: ${{ vars.DOCKER_USERNAME }}/${{ github.event.repository.name }}:latest +``` + +Each GitHub Actions workflow includes one or several jobs. Each job consists of steps. Each step can either run a set of commands or use already [existing actions](https://github.com/marketplace?type=actions). The action above has three steps: + +1. [**Login to Docker Hub**](https://github.com/docker/login-action): Action logs in to Docker Hub using the Docker ID and Personal Access Token (PAT) you created earlier. + +2. [**Set up Docker Buildx**](https://github.com/docker/setup-buildx-action): Action sets up Docker [Buildx](https://github.com/docker/buildx), a CLI plugin that extends the capabilities of the Docker CLI. + +3. [**Build and push**](https://github.com/docker/build-push-action): Action builds and pushes the Docker image to Docker Hub. The `tags` parameter specifies the image name and tag. The `latest` tag is used in this example. + +## 2. Run the workflow + +Let's commit the changes, push them to the `main` branch. In the workflow above, the trigger is set to `push` events on the `main` branch. This means that the workflow will run every time you push changes to the `main` branch. You can find more information about the workflow triggers [here](https://docs.github.com/en/actions/writing-workflows/choosing-when-your-workflow-runs/events-that-trigger-workflows). + +Go to the **Actions** tab of you GitHub repository. It displays the workflow. Selecting the workflow shows you the breakdown of all the steps. + +When the workflow is complete, go to your [repositories on Docker Hub](https://hub.docker.com/repositories). If you see the new repository in that list, it means the GitHub Actions workflow successfully pushed the image to Docker Hub. + +## Summary + +In this section, you learned how to set up a GitHub Actions workflow for your Python application that includes: + +- Running pre-commit hooks for linting and formatting +- Static type checking with Pyright +- Building and pushing Docker images + +Related information: + +- [Introduction to GitHub Actions](/guides/gha.md) +- [Docker Build GitHub Actions](/manuals/build/ci/github-actions/_index.md) +- [Workflow syntax for GitHub Actions](https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions) + +## Next steps + +In the next section, you'll learn how you can develop locally using kubernetes. + diff --git a/content/guides/python/containerize.md b/content/guides/python/containerize.md index 74ab2bb9fc94..f53be97a2db9 100644 --- a/content/guides/python/containerize.md +++ b/content/guides/python/containerize.md @@ -27,7 +27,7 @@ The sample application uses the popular [FastAPI](https://fastapi.tiangolo.com) Clone the sample application to use with this guide. Open a terminal, change directory to a directory that you want to work in, and run the following command to clone the repository: ```console -$ git clone https://github.com/estebanx64/python-docker-example +$ git clone https://github.com/estebanx64/python-docker-example && cd python-docker-example ``` ## Initialize Docker assets @@ -58,7 +58,7 @@ This utility will walk you through creating the following files with sensible de Let's get started! ? What application platform does your project use? Python -? What version of Python do you want to use? 3.11.4 +? What version of Python do you want to use? 3.12 ? What port do you want your app to listen on? 8000 ? What is the command to run your app? python3 -m uvicorn app:app --host=0.0.0.0 --port=8000 ``` @@ -139,8 +139,8 @@ Create a file named `Dockerfile` with the following contents. # Want to help us make this template better? Share your feedback here: https://forms.gle/ybq9Krt8jtBL3iCk7 -ARG PYTHON_VERSION=3.11.4 -FROM python:${PYTHON_VERSION}-slim AS base +ARG PYTHON_VERSION=3.12 +FROM python:${PYTHON_VERSION}-slim # Prevents Python from writing pyc files. ENV PYTHONDONTWRITEBYTECODE=1 @@ -181,7 +181,7 @@ COPY . . EXPOSE 8000 # Run the application. -CMD python3 -m uvicorn app:app --host=0.0.0.0 --port=8000 +CMD ["python3", "-m", "uvicorn", "app:app", "--host=0.0.0.0", "--port=8000"] ``` Create a file named `compose.yaml` with the following contents. @@ -375,5 +375,4 @@ Related information: ## Next steps -In the next section, you'll learn how you can develop your application using -containers. +In the next section, you'll take a look at how to set up a local development environment using Docker containers. diff --git a/content/guides/python/deploy.md b/content/guides/python/deploy.md index e77e04513583..b92891559397 100644 --- a/content/guides/python/deploy.md +++ b/content/guides/python/deploy.md @@ -98,7 +98,7 @@ data: In your `python-docker-dev-example` directory, create a file named `docker-python-kubernetes.yaml`. Replace `DOCKER_USERNAME/REPO_NAME` with your Docker username and the repository name that you created in [Configure CI/CD for -your Python application](./configure-ci-cd.md). +your Python application](./configure-github-actions.md). ```yaml apiVersion: apps/v1 @@ -158,10 +158,10 @@ In these Kubernetes YAML file, there are various objects, separated by the `---` you'll get just one replica, or copy of your pod. That pod, which is described under `template`, has just one container in it. The container is created from the image built by GitHub Actions in [Configure CI/CD for - your Python application](configure-ci-cd.md). + your Python application](configure-github-actions.md). - A Service, which will define how the ports are mapped in the containers. - A PersistentVolumeClaim, to define a storage that will be persistent through restarts for the database. -- A Secret, Keeping the database password as a example using secret kubernetes resource. +- A Secret, Keeping the database password as an example using secret kubernetes resource. - A NodePort service, which will route traffic from port 30001 on your host to port 8001 inside the pods it routes to, allowing you to reach your app from the network. diff --git a/content/guides/python/develop.md b/content/guides/python/develop.md index da9bd980e0f7..7a8b5b2bd847 100644 --- a/content/guides/python/develop.md +++ b/content/guides/python/develop.md @@ -1,7 +1,7 @@ --- title: Use containers for Python development linkTitle: Develop your app -weight: 20 +weight: 15 keywords: python, local, development description: Learn how to develop your Python application locally. aliases: @@ -51,7 +51,7 @@ You'll need to clone a new repository to get a sample application that includes Let's get started! ? What application platform does your project use? Python - ? What version of Python do you want to use? 3.11.4 + ? What version of Python do you want to use? 3.12 ? What port do you want your app to listen on? 8001 ? What is the command to run your app? python3 -m uvicorn app:app --host=0.0.0.0 --port=8001 ``` @@ -132,8 +132,8 @@ You'll need to clone a new repository to get a sample application that includes # Want to help us make this template better? Share your feedback here: https:// forms.gle/ybq9Krt8jtBL3iCk7 - ARG PYTHON_VERSION=3.11.4 - FROM python:${PYTHON_VERSION}-slim as base + ARG PYTHON_VERSION=3.12 + FROM python:${PYTHON_VERSION}-slim # Prevents Python from writing pyc files. ENV PYTHONDONTWRITEBYTECODE=1 @@ -174,7 +174,7 @@ You'll need to clone a new repository to get a sample application that includes EXPOSE 8001 # Run the application. - CMD python3 -m uvicorn app:app --host=0.0.0.0 --port=8001 + CMD ["python3", "-m", "uvicorn", "app:app", "--host=0.0.0.0", "--port=8001"] ``` Create a file named `compose.yaml` with the following contents. @@ -569,4 +569,4 @@ Related information: ## Next steps -In the next section, you'll take a look at how to set up a CI/CD pipeline using GitHub Actions. +In the next section, you'll learn how you can set up linting, formatting and type checking to follow the best practices in python apps. diff --git a/content/guides/python/lint-format-typing.md b/content/guides/python/lint-format-typing.md new file mode 100644 index 000000000000..a96aead3530d --- /dev/null +++ b/content/guides/python/lint-format-typing.md @@ -0,0 +1,122 @@ +--- +title: Linting, formatting, and type checking for Python +linkTitle: Linting and typing +weight: 25 +keywords: Python, linting, formatting, type checking, ruff, pyright +description: Learn how to set up linting, formatting and type checking for your Python application. +aliases: + - /language/python/lint-format-typing/ +--- + +## Prerequisites + +Complete [Develop your app](develop.md). + +## Overview + +In this section, you'll learn how to set up code quality tools for your Python application. This includes: + +- Linting and formatting with Ruff +- Static type checking with Pyright +- Automating checks with pre-commit hooks + +## Linting and formatting with Ruff + +Ruff is an extremely fast Python linter and formatter written in Rust. It replaces multiple tools like flake8, isort, and black with a single unified tool. + +Create a `pyproject.toml` file: + +```toml +[tool.ruff] +target-version = "py312" + +[tool.ruff.lint] +select = [ + "E", # pycodestyle errors + "W", # pycodestyle warnings + "F", # pyflakes + "I", # isort + "B", # flake8-bugbear + "C4", # flake8-comprehensions + "UP", # pyupgrade + "ARG001", # unused arguments in functions +] +ignore = [ + "E501", # line too long, handled by black + "B008", # do not perform function calls in argument defaults + "W191", # indentation contains tabs + "B904", # Allow raising exceptions without from e, for HTTPException +] +``` + +### Using Ruff + +Run these commands to check and format your code: + +```bash +# Check for errors +ruff check . + +# Automatically fix fixable errors +ruff check --fix . + +# Format code +ruff format . +``` + +## Type checking with Pyright + +Pyright is a fast static type checker for Python that works well with modern Python features. + +Add `Pyright` configuration in `pyproject.toml`: + +```toml +[tool.pyright] +typeCheckingMode = "strict" +pythonVersion = "3.12" +exclude = [".venv"] +``` + +### Running Pyright + +To check your code for type errors: + +```bash +pyright +``` + +## Setting up pre-commit hooks + +Pre-commit hooks automatically run checks before each commit. The following `.pre-commit-config.yaml` snippet sets up Ruff: + +```yaml + https: https://github.com/charliermarsh/ruff-pre-commit + rev: v0.2.2 + hooks: + - id: ruff + args: [--fix] + - id: ruff-format +``` + +To install and use: + +```bash +pre-commit install +git commit -m "Test commit" # Automatically runs checks +``` + +## Summary + +In this section, you learned how to: + +- Configure and use Ruff for linting and formatting +- Set up Pyright for static type checking +- Automate checks with pre-commit hooks + +These tools help maintain code quality and catch errors early in development. + +## Next steps + +- [Configure GitHub Actions](configure-github-actions.md) to run these checks automatically +- Customize linting rules to match your team's style preferences +- Explore advanced type checking features \ No newline at end of file diff --git a/content/guides/r/configure-ci-cd.md b/content/guides/r/configure-ci-cd.md index 8e2465d2712a..472ec6969866 100644 --- a/content/guides/r/configure-ci-cd.md +++ b/content/guides/r/configure-ci-cd.md @@ -30,9 +30,9 @@ Create a GitHub repository, configure the Docker Hub credentials, and push your 2. Open the repository **Settings**, and go to **Secrets and variables** > **Actions**. -3. Create a new **Repository variable** named `DOCKER_USERNAME` and your Docker ID as value. +3. Create a new **Repository variable** named `DOCKER_USERNAME` and your Docker ID as a value. -4. Create a new [Personal Access Token (PAT)](/manuals/security/for-developers/access-tokens.md#create-an-access-token) for Docker Hub. You can name this token `docker-tutorial`. Make sure access permissions include Read and Write. +4. Create a new [Personal Access Token (PAT)](/manuals/security/access-tokens.md#create-an-access-token) for Docker Hub. You can name this token `docker-tutorial`. Make sure access permissions include Read and Write. 5. Add the PAT as a **Repository secret** in your GitHub repository, with the name `DOCKERHUB_TOKEN`. diff --git a/content/guides/r/containerize.md b/content/guides/r/containerize.md index 4357e40c794e..14b4a8dd3061 100644 --- a/content/guides/r/containerize.md +++ b/content/guides/r/containerize.md @@ -26,7 +26,7 @@ The sample application uses the popular [Shiny](https://shiny.posit.co/) framewo Clone the sample application to use with this guide. Open a terminal, change directory to a directory that you want to work in, and run the following command to clone the repository: ```console -$ git clone https://github.com/mfranzon/r-docker-dev.git +$ git clone https://github.com/mfranzon/r-docker-dev.git && cd r-docker-dev ``` You should now have the following contents in your `r-docker-dev` diff --git a/content/guides/rag-ollama/containerize.md b/content/guides/rag-ollama/containerize.md index 72cf3f6a6eee..402c480af541 100644 --- a/content/guides/rag-ollama/containerize.md +++ b/content/guides/rag-ollama/containerize.md @@ -106,4 +106,3 @@ application using Docker. In the next section, you'll learn how to properly configure the application with your preferred LLM model, completely locally, using Docker. -{{< button text="Develop your application" url="develop.md" >}} diff --git a/content/guides/reactjs/_index.md b/content/guides/reactjs/_index.md new file mode 100644 index 000000000000..10024e063f5e --- /dev/null +++ b/content/guides/reactjs/_index.md @@ -0,0 +1,50 @@ +--- +title: React.js language-specific guide +linkTitle: React.js +description: Containerize and develop React.js apps using Docker +keywords: getting started, React.js, react.js, docker, language, Dockerfile +summary: | + This guide explains how to containerize React.js applications using Docker. +toc_min: 1 +toc_max: 2 +languages: [js] +params: + time: 20 minutes + +--- + +The React.js language-specific guide shows you how to containerize a React.js application using Docker, following best practices for creating efficient, production-ready containers. + +[React.js](https://react.dev/) is a widely used library for building interactive user interfaces. However, managing dependencies, environments, and deployments efficiently can be complex. Docker simplifies this process by providing a consistent and containerized environment. + +> +> **Acknowledgment** +> +> Docker extends its sincere gratitude to [Kristiyan Velkov](https://www.linkedin.com/in/kristiyan-velkov-763130b3/) for authoring this guide. As a Docker Captain and experienced Front-end engineer, his expertise in Docker, DevOps, and modern web development has made this resource invaluable for the community, helping developers navigate and optimize their Docker workflows. + +--- + +## What will you learn? + +In this guide, you will learn how to: + +- Containerize and run a React.js application using Docker. +- Set up a local development environment for React.js inside a container. +- Run tests for your React.js application within a Docker container. +- Configure a CI/CD pipeline using GitHub Actions for your containerized app. +- Deploy the containerized React.js application to a local Kubernetes cluster for testing and debugging. + +To begin, you’ll start by containerizing an existing React.js application. + +--- + +## Prerequisites + +Before you begin, make sure you're familiar with the following: + +- Basic understanding of [JavaScript](https://developer.mozilla.org/en-US/docs/Web/JavaScript) or [TypeScript](https://www.typescriptlang.org/). +- Basic knowledge of [Node.js](https://nodejs.org/en) and [npm](https://docs.npmjs.com/about-npm) for managing dependencies and running scripts. +- Familiarity with [React.js](https://react.dev/) fundamentals. +- Understanding of Docker concepts such as images, containers, and Dockerfiles. If you're new to Docker, start with the [Docker basics](/get-started/docker-concepts/the-basics/what-is-a-container.md) guide. + +Once you've completed the React.js getting started modules, you’ll be ready to containerize your own React.js application using the examples and instructions provided in this guide. diff --git a/content/guides/reactjs/configure-github-actions.md b/content/guides/reactjs/configure-github-actions.md new file mode 100644 index 000000000000..d83ae8dc8f19 --- /dev/null +++ b/content/guides/reactjs/configure-github-actions.md @@ -0,0 +1,321 @@ +--- +title: Automate your builds with GitHub Actions +linkTitle: Automate your builds with GitHub Actions +weight: 60 +keywords: CI/CD, GitHub( Actions), React.js, Next.js +description: Learn how to configure CI/CD using GitHub Actions for your React.js application. + +--- + +## Prerequisites + +Complete all the previous sections of this guide, starting with [Containerize React.js application](containerize.md). + +You must also have: +- A [GitHub](https://github.com/signup) account. +- A [Docker Hub](https://hub.docker.com/signup) account. + +--- + +## Overview + +In this section, you'll set up a **CI/CD pipeline** using [GitHub Actions](https://docs.github.com/en/actions) to automatically: + +- Build your React.js application inside a Docker container. +- Run tests in a consistent environment. +- Push the production-ready image to [Docker Hub](https://hub.docker.com). + +--- + +## Connect your GitHub repository to Docker Hub + +To enable GitHub Actions to build and push Docker images, you’ll securely store your Docker Hub credentials in your new GitHub repository. + +### Step 1: Connect your GitHub repository to Docker Hub + +1. Create a Personal Access Token (PAT) from [Docker Hub](https://hub.docker.com) + 1. Go to your **Docker Hub account → Account Settings → Security**. + 2. Generate a new Access Token with **Read/Write** permissions. + 3. Name it something like `docker-reactjs-sample`. + 4. Copy and save the token — you’ll need it in Step 4. + +2. Create a repository in [Docker Hub](https://hub.docker.com/repositories/) + 1. Go to your **Docker Hub account → Create a repository**. + 2. For the Repository Name, use something descriptive — for example: `reactjs-sample`. + 3. Once created, copy and save the repository name — you’ll need it in Step 4. + +3. Create a new [GitHub repository](https://github.com/new) for your React.js project + +4. Add Docker Hub credentials as GitHub repository secrets + + In your newly created GitHub repository: + + 1. Navigate to: + **Settings → Secrets and variables → Actions → New repository secret**. + + 2. Add the following secrets: + + | Name | Value | + |-------------------|--------------------------------| + | `DOCKER_USERNAME` | Your Docker Hub username | + | `DOCKERHUB_TOKEN` | Your Docker Hub access token (created in Step 1) | + | `DOCKERHUB_PROJECT_NAME` | Your Docker Project Name (created in Step 2) | + + These secrets let GitHub Actions to authenticate securely with Docker Hub during automated workflows. + +5. Connect Your Local Project to GitHub + + Link your local project `docker-reactjs-sample` to the GitHub repository you just created by running the following command from your project root: + + ```console + $ git remote set-url origin https://github.com/{your-username}/{your-repository-name}.git + ``` + + >[!IMPORTANT] + >Replace `{your-username}` and `{your-repository}` with your actual GitHub username and repository name. + + To confirm that your local project is correctly connected to the remote GitHub repository, run: + + ```console + $ git remote -v + ``` + + You should see output similar to: + + ```console + origin https://github.com/{your-username}/{your-repository-name}.git (fetch) + origin https://github.com/{your-username}/{your-repository-name}.git (push) + ``` + + This confirms that your local repository is properly linked and ready to push your source code to GitHub. + +6. Push Your Source Code to GitHub + + Follow these steps to commit and push your local project to your GitHub repository: + + 1. Stage all files for commit. + + ```console + $ git add -A + ``` + This command stages all changes — including new, modified, and deleted files — preparing them for commit. + + + 2. Commit your changes. + + ```console + $ git commit -m "Initial commit" + ``` + This command creates a commit that snapshots the staged changes with a descriptive message. + + 3. Push the code to the `main` branch. + + ```console + $ git push -u origin main + ``` + This command pushes your local commits to the `main` branch of the remote GitHub repository and sets the upstream branch. + +Once completed, your code will be available on GitHub, and any GitHub Actions workflow you’ve configured will run automatically. + +> [!NOTE] +> Learn more about the Git commands used in this step: +> - [Git add](https://git-scm.com/docs/git-add) – Stage changes (new, modified, deleted) for commit +> - [Git commit](https://git-scm.com/docs/git-commit) – Save a snapshot of your staged changes +> - [Git push](https://git-scm.com/docs/git-push) – Upload local commits to your GitHub repository +> - [Git remote](https://git-scm.com/docs/git-remote) – View and manage remote repository URLs + +--- + +### Step 2: Set up the workflow + +Now you'll create a GitHub Actions workflow that builds your Docker image, runs tests, and pushes the image to Docker Hub. + +1. Go to your repository on GitHub and select the **Actions** tab in the top menu. + +2. Select **Set up a workflow yourself** when prompted. + + This opens an inline editor to create a new workflow file. By default, it will be saved to: + `.github/workflows/main.yml` + + +3. Add the following workflow configuration to the new file: + +```yaml +name: CI/CD – React.js Application with Docker + +on: + push: + branches: [main] + pull_request: + branches: [main] + types: [opened, synchronize, reopened] + +jobs: + build-test-push: + name: Build, Test and Push Docker Image + runs-on: ubuntu-latest + + steps: + # 1. Checkout source code + - name: Checkout source code + uses: actions/checkout@v4 + with: + fetch-depth: 0 # Fetches full history for better caching/context + + # 2. Set up Docker Buildx + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + # 3. Cache Docker layers + - name: Cache Docker layers + uses: actions/cache@v4 + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-${{ github.sha }} + restore-keys: ${{ runner.os }}-buildx- + + # 4. Cache npm dependencies + - name: Cache npm dependencies + uses: actions/cache@v4 + with: + path: ~/.npm + key: ${{ runner.os }}-npm-${{ hashFiles('**/package-lock.json') }} + restore-keys: ${{ runner.os }}-npm- + + # 5. Extract metadata + - name: Extract metadata + id: meta + run: | + echo "REPO_NAME=${GITHUB_REPOSITORY##*/}" >> "$GITHUB_OUTPUT" + echo "SHORT_SHA=${GITHUB_SHA::7}" >> "$GITHUB_OUTPUT" + + # 6. Build dev Docker image + - name: Build Docker image for tests + uses: docker/build-push-action@v6 + with: + context: . + file: Dockerfile.dev + tags: ${{ steps.meta.outputs.REPO_NAME }}-dev:latest + load: true # Load to local Docker daemon for testing + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache,mode=max + + # 7. Run Vitest tests + - name: Run Vitest tests and generate report + run: | + docker run --rm \ + --workdir /app \ + --entrypoint "" \ + ${{ steps.meta.outputs.REPO_NAME }}-dev:latest \ + sh -c "npm ci && npx vitest run --reporter=verbose" + env: + CI: true + NODE_ENV: test + timeout-minutes: 10 + + # 8. Login to Docker Hub + - name: Log in to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + # 9. Build and push prod image + - name: Build and push production image + uses: docker/build-push-action@v6 + with: + context: . + file: Dockerfile + push: true + platforms: linux/amd64,linux/arm64 + tags: | + ${{ secrets.DOCKER_USERNAME }}/${{ secrets.DOCKERHUB_PROJECT_NAME }}:latest + ${{ secrets.DOCKER_USERNAME }}/${{ secrets.DOCKERHUB_PROJECT_NAME }}:${{ steps.meta.outputs.SHORT_SHA }} + cache-from: type=local,src=/tmp/.buildx-cache +``` + +This workflow performs the following tasks for your React.js application: +- Triggers on every `push` or `pull request` targeting the `main` branch. +- Builds a development Docker image using `Dockerfile.dev`, optimized for testing. +- Executes unit tests using Vitest inside a clean, containerized environment to ensure consistency. +- Halts the workflow immediately if any test fails — enforcing code quality. +- Caches both Docker build layers and npm dependencies for faster CI runs. +- Authenticates securely with Docker Hub using GitHub repository secrets. +- Builds a production-ready image using the `prod` stage in `Dockerfile`. +- Tags and pushes the final image to Docker Hub with both `latest` and short SHA tags for traceability. + +> [!NOTE] +> For more information about `docker/build-push-action`, refer to the [GitHub Action README](https://github.com/docker/build-push-action/blob/master/README.md). + +--- + +### Step 3: Run the workflow + +After you've added your workflow file, it's time to trigger and observe the CI/CD process in action. + +1. Commit and push your workflow file + + Select "Commit changes…" in the GitHub editor. + + - This push will automatically trigger the GitHub Actions pipeline. + +2. Monitor the workflow execution + + 1. Go to the Actions tab in your GitHub repository. + 2. Click into the workflow run to follow each step: **build**, **test**, and (if successful) **push**. + +3. Verify the Docker image on Docker Hub + + - After a successful workflow run, visit your [Docker Hub repositories](https://hub.docker.com/repositories). + - You should see a new image under your repository with: + - Repository name: `${your-repository-name}` + - Tags include: + - `latest` – represents the most recent successful build; ideal for quick testing or deployment. + - `` – a unique identifier based on the commit hash, useful for version tracking, rollbacks, and traceability. + +> [!TIP] Protect your main branch +> To maintain code quality and prevent accidental direct pushes, enable branch protection rules: +> - Navigate to your **GitHub repo → Settings → Branches**. +> - Under Branch protection rules, click **Add rule**. +> - Specify `main` as the branch name. +> - Enable options like: +> - *Require a pull request before merging*. +> - *Require status checks to pass before merging*. +> +> This ensures that only tested and reviewed code is merged into `main` branch. +--- + +## Summary + +In this section, you set up a complete CI/CD pipeline for your containerized React.js application using GitHub Actions. + +Here's what you accomplished: + +- Created a new GitHub repository specifically for your project. +- Generated a secure Docker Hub access token and added it to GitHub as a secret. +- Defined a GitHub Actions workflow to: + - Build your application inside a Docker container. + - Run tests in a consistent, containerized environment. + - Push a production-ready image to Docker Hub if tests pass. +- Triggered and verified the workflow execution through GitHub Actions. +- Confirmed that your image was successfully published to Docker Hub. + +With this setup, your React.js application is now ready for automated testing and deployment across environments — increasing confidence, consistency, and team productivity. + +--- + +## Related resources + +Deepen your understanding of automation and best practices for containerized apps: + +- [Introduction to GitHub Actions](/guides/gha.md) – Learn how GitHub Actions automate your workflows +- [Docker Build GitHub Actions](/manuals/build/ci/github-actions/_index.md) – Set up container builds with GitHub Actions +- [Workflow syntax for GitHub Actions](https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions) – Full reference for writing GitHub workflows +- [Compose file reference](/compose/compose-file/) – Full configuration reference for `compose.yaml` +- [Best practices for writing Dockerfiles](/develop/develop-images/dockerfile_best-practices/) – Optimize your image for performance and security + +--- + +## Next steps + +Next, learn how you can locally test and debug your React.js workloads on Kubernetes before deploying. This helps you ensure your application behaves as expected in a production-like environment, reducing surprises during deployment. diff --git a/content/guides/reactjs/containerize.md b/content/guides/reactjs/containerize.md new file mode 100644 index 000000000000..35330dcad9b8 --- /dev/null +++ b/content/guides/reactjs/containerize.md @@ -0,0 +1,472 @@ +--- +title: Containerize a React.js Application +linkTitle: Containerize +weight: 10 +keywords: react.js, node, image, initialize, build +description: Learn how to containerize a React.js application with Docker by creating an optimized, production-ready image using best practices for performance, security, and scalability. + +--- + +## Prerequisites + +Before you begin, make sure the following tools are installed and available on your system: + +- You have installed the latest version of [Docker Desktop](/get-started/get-docker.md). +- You have a [git client](https://git-scm.com/downloads). The examples in this section use a command-line based git client, but you can use any client. + +> **New to Docker?** +> Start with the [Docker basics](/get-started/docker-concepts/the-basics/what-is-a-container.md) guide to get familiar with key concepts like images, containers, and Dockerfiles. + +--- + +## Overview + +This guide walks you through the complete process of containerizing a React.js application with Docker. You’ll learn how to create a production-ready Docker image using best practices that improve performance, security, scalability, and deployment efficiency. + +By the end of this guide, you will: + +- Containerize a React.js application using Docker. +- Create and optimize a Dockerfile for production builds. +- Use multi-stage builds to minimize image size. +- Serve the application efficiently with a custom NGINX configuration. +- Follow best practices for building secure and maintainable Docker images. + +--- + +## Get the sample application + +Clone the sample application to use with this guide. Open a terminal, change +directory to a directory that you want to work in, and run the following command +to clone the git repository: + +```console +$ git clone https://github.com/kristiyan-velkov/docker-reactjs-sample +``` +--- + +## Generate a Dockerfile + +Docker provides an interactive CLI tool called `docker init` that helps scaffold the necessary configuration files for containerizing your application. This includes generating a `Dockerfile`, `.dockerignore`, `compose.yaml`, and `README.Docker.md`. + +To begin, navigate to the root of your project directory: + +```console +$ cd docker-reactjs-sample +``` + +Then run the following command: + +```console +$ docker init +``` +You’ll see output similar to: + +```text +Welcome to the Docker Init CLI! + +This utility will walk you through creating the following files with sensible defaults for your project: + - .dockerignore + - Dockerfile + - compose.yaml + - README.Docker.md + +Let's get started! +``` + +The CLI will prompt you with a few questions about your app setup. +For consistency, please use the same responses shown in the example below when prompted: +| Question | Answer | +|------------------------------------------------------------|-----------------| +| What application platform does your project use? | Node | +| What version of Node do you want to use? | 22.14.0-alpine | +| Which package manager do you want to use? | npm | +| Do you want to run "npm run build" before starting server? | yes | +| What directory is your build output to? | dist | +| What command do you want to use to start the app? | npm run dev | +| What port does your server listen on? | 8080 | + +After completion, your project directory will contain the following new files: + +```text +├── docker-reactjs-sample/ +│ ├── Dockerfile +│ ├── .dockerignore +│ ├── compose.yaml +│ └── README.Docker.md +``` + +--- + +## Build the Docker image + +The default Dockerfile generated by `docker init` serves as a solid starting point for general Node.js applications. However, React.js is a front-end library that compiles into static assets, so we need to tailor the Dockerfile to optimize for how React applications are built and served in a production environment. + +### Step 1: Review the generated files + +In this step, you’ll improve the Dockerfile and configuration files by following best practices: + +- Use multi-stage builds to keep the final image clean and small +- Serve the app using NGINX, a fast and secure web server +- Improve performance and security by only including what’s needed + +These updates help ensure your app is easy to deploy, fast to load, and production-ready. + +> [!NOTE] +> A `Dockerfile` is a plain text file that contains step-by-step instructions to build a Docker image. It automates packaging your application along with its dependencies and runtime environment. +> For full details, see the [Dockerfile reference](/reference/dockerfile/). + + +### Step 2: Configure the Dockerfile file + +Copy and replace the contents of your existing `Dockerfile` with the configuration below: + +```dockerfile +# ========================================= +# Stage 1: Build the React.js Application +# ========================================= +ARG NODE_VERSION=22.14.0-alpine +ARG NGINX_VERSION=alpine3.21 + +# Use a lightweight Node.js image for building (customizable via ARG) +FROM node:${NODE_VERSION} AS builder + +# Set the working directory inside the container +WORKDIR /app + +# Copy package-related files first to leverage Docker's caching mechanism +COPY package.json package-lock.json ./ + +# Install project dependencies using npm ci (ensures a clean, reproducible install) +RUN --mount=type=cache,target=/root/.npm npm ci + +# Copy the rest of the application source code into the container +COPY . . + +# Build the React.js application (outputs to /app/dist) +RUN npm run build + +# ========================================= +# Stage 2: Prepare Nginx to Serve Static Files +# ========================================= + +FROM nginxinc/nginx-unprivileged:${NGINX_VERSION} AS runner + +# Use a built-in non-root user for security best practices +USER nginx + +# Copy custom Nginx config +COPY nginx.conf /etc/nginx/nginx.conf + +# Copy the static build output from the build stage to Nginx's default HTML serving directory +COPY --chown=nginx:nginx --from=builder /app/dist /usr/share/nginx/html + +# Expose port 8080 to allow HTTP traffic +# Note: The default NGINX container now listens on port 8080 instead of 80 +EXPOSE 8080 + +# Start Nginx directly with custom config +ENTRYPOINT ["nginx", "-c", "/etc/nginx/nginx.conf"] +CMD ["-g", "daemon off;"] +``` + +### Step 3: Configure the .dockerignore file + +The `.dockerignore` file tells Docker which files and folders to exclude when building the image. + + +> [!NOTE] +>This helps: +>- Reduce image size +>- Speed up the build process +>- Prevent sensitive or unnecessary files (like `.env`, `.git`, or `node_modules`) from being added to the final image. +> +> To learn more, visit the [.dockerignore reference](/reference/dockerfile.md#dockerignore-file). + +Copy and replace the contents of your existing `.dockerignore` with the configuration below: + +```dockerignore +# Ignore dependencies and build output +node_modules/ +dist/ +out/ +.tmp/ +.cache/ + +# Ignore Vite, Webpack, and React-specific build artifacts +.vite/ +.vitepress/ +.eslintcache +.npm/ +coverage/ +jest/ +cypress/ +cypress/screenshots/ +cypress/videos/ +reports/ + +# Ignore environment and config files (sensitive data) +*.env* +*.log + +# Ignore TypeScript build artifacts (if using TypeScript) +*.tsbuildinfo + +# Ignore lockfiles (optional if using Docker for package installation) +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* + +# Ignore local development files +.git/ +.gitignore +.vscode/ +.idea/ +*.swp +.DS_Store +Thumbs.db + +# Ignore Docker-related files (to avoid copying unnecessary configs) +Dockerfile +.dockerignore +docker-compose.yml +docker-compose.override.yml + +# Ignore build-specific cache files +*.lock + +``` + +### Step 4: Create the `nginx.conf` file + +To serve your React.js application efficiently inside the container, you’ll configure NGINX with a custom setup. This configuration is optimized for performance, browser caching, gzip compression, and support for client-side routing. + +Create a file named `nginx.conf` in the root of your project directory, and add the following content: + +> [!NOTE] +> To learn more about configuring NGINX, see the [official NGINX documentation](https://nginx.org/en/docs/). + + +```nginx +worker_processes auto; + +# Store PID in /tmp (always writable) +pid /tmp/nginx.pid; + +events { + worker_connections 1024; +} + +http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + # Disable logging to avoid permission issues + access_log off; + error_log /dev/stderr warn; + + # Optimize static file serving + sendfile on; + tcp_nopush on; + tcp_nodelay on; + keepalive_timeout 65; + keepalive_requests 1000; + + # Gzip compression for optimized delivery + gzip on; + gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript image/svg+xml; + gzip_min_length 256; + gzip_vary on; + + server { + listen 8080; + server_name localhost; + + # Root directory where React.js build files are placed + root /usr/share/nginx/html; + index index.html; + + # Serve React.js static files with proper caching + location / { + try_files $uri /index.html; + } + + # Serve static assets with long cache expiration + location ~* \.(?:ico|css|js|gif|jpe?g|png|woff2?|eot|ttf|svg|map)$ { + expires 1y; + access_log off; + add_header Cache-Control "public, immutable"; + } + + # Handle React.js client-side routing + location /static/ { + expires 1y; + add_header Cache-Control "public, immutable"; + } + } +} +``` + +### Step 5: Build the React.js application image + +With your custom configuration in place, you're now ready to build the Docker image for your React.js application. + +The updated setup includes: + +- Optimized browser caching and gzip compression +- Secure, non-root logging to avoid permission issues +- Support for React client-side routing by redirecting unmatched routes to `index.html` + +After completing the previous steps, your project directory should now contain the following files: + +```text +├── docker-reactjs-sample/ +│ ├── Dockerfile +│ ├── .dockerignore +│ ├── compose.yaml +│ ├── nginx.conf +│ └── README.Docker.md +``` + +Now that your Dockerfile is configured, you can build the Docker image for your React.js application. + +> [!NOTE] +> The `docker build` command packages your application into an image using the instructions in the Dockerfile. It includes all necessary files from the current directory (called the [build context](/build/concepts/context/#what-is-a-build-context)). + +Run the following command from the root of your project: + +```console +$ docker build --tag docker-reactjs-sample . +``` + +What this command does: +- Uses the Dockerfile in the current directory (.) +- Packages the application and its dependencies into a Docker image +- Tags the image as docker-reactjs-sample so you can reference it later + + +#### Step 6: View local images + +After building your Docker image, you can check which images are available on your local machine using either the Docker CLI or [Docker Desktop](/manuals/desktop/use-desktop/images.md). Since you're already working in the terminal, let's use the Docker CLI. + +To list all locally available Docker images, run the following command: + +```console +$ docker images +``` + +Example Output: + +```shell +REPOSITORY TAG IMAGE ID CREATED SIZE +docker-reactjs-sample latest f39b47a97156 14 seconds ago 75.8MB +``` + +This output provides key details about your images: + +- **Repository** – The name assigned to the image. +- **Tag** – A version label that helps identify different builds (e.g., latest). +- **Image ID** – A unique identifier for the image. +- **Created** – The timestamp indicating when the image was built. +- **Size** – The total disk space used by the image. + +If the build was successful, you should see `docker-reactjs-sample` image listed. + +--- + +## Run the containerized application + +In the previous step, you created a Dockerfile for your React.js application and built a Docker image using the docker build command. Now it’s time to run that image in a container and verify that your application works as expected. + + +Inside the `docker-reactjs-sample` directory, run the following command in a +terminal. + +```console +$ docker compose up --build +``` + +Open a browser and view the application at [http://localhost:8080](http://localhost:8080). You should see a simple React.js web application. + +Press `ctrl+c` in the terminal to stop your application. + +### Run the application in the background + +You can run the application detached from the terminal by adding the `-d` +option. Inside the `docker-reactjs-sample` directory, run the following command +in a terminal. + +```console +$ docker compose up --build -d +``` + +Open a browser and view the application at [http://localhost:8080](http://localhost:8080). You should see a simple web application preview. + + +To confirm that the container is running, use `docker ps` command: + +```console +$ docker ps +``` + +This will list all active containers along with their ports, names, and status. Look for a container exposing port 8080. + +Example Output: + +```shell +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +88bced6ade95 docker-reactjs-sample-server "nginx -c /etc/nginx…" About a minute ago Up About a minute 0.0.0.0:8080->8080/tcp docker-reactjs-sample-server-1 +``` + + +To stop the application, run: + +```console +$ docker compose down +``` + + +> [!NOTE] +> For more information about Compose commands, see the [Compose CLI +> reference](/reference/cli/docker/compose/_index.md). + +--- + +## Summary + +In this guide, you learned how to containerize, build, and run a React.js application using Docker. By following best practices, you created a secure, optimized, and production-ready setup. + +What you accomplished: +- Initialized your project using `docker init` to scaffold essential Docker configuration files. +- Replaced the default `Dockerfile` with a multi-stage build that compiles the React.js application and serves the static files using Nginx. +- Replaced the default `.dockerignore` file to exclude unnecessary files and keep the image clean and efficient. +- Built your Docker image using `docker build`. +- Ran the container using `docker compose up`, both in the foreground and in detached mode. +- Verified that the app was running by visiting [http://localhost:8080](http://localhost:8080). +- Learned how to stop the containerized application using `docker compose down`. + +You now have a fully containerized React.js application, running in a Docker container, and ready for deployment across any environment with confidence and consistency. + +--- + +## Related resources + +Explore official references and best practices to sharpen your Docker workflow: + +- [Multi-stage builds](/build/building/multi-stage/) – Learn how to separate build and runtime stages. +- [Best practices for writing Dockerfiles](/develop/develop-images/dockerfile_best-practices/) – Write efficient, maintainable, and secure Dockerfiles. +- [Build context in Docker](/build/concepts/context/) – Learn how context affects image builds. +- [`docker init` CLI reference](/reference/cli/docker/init/) – Scaffold Docker assets automatically. +- [`docker build` CLI reference](/reference/cli/docker/build/) – Build Docker images from a Dockerfile. +- [`docker images` CLI reference](/reference/cli/docker/images/) – Manage and inspect local Docker images. +- [`docker compose up` CLI reference](/reference/cli/docker/compose/up/) – Start and run multi-container applications. +- [`docker compose down` CLI reference](/reference/cli/docker/compose/down/) – Stop and remove containers, networks, and volumes. + +--- + +## Next steps + +With your React.js application now containerized, you're ready to move on to the next step. + +In the next section, you'll learn how to develop your application using Docker containers, enabling a consistent, isolated, and reproducible development environment across any machine. + diff --git a/content/guides/reactjs/deploy.md b/content/guides/reactjs/deploy.md new file mode 100644 index 000000000000..86d25d3dbf47 --- /dev/null +++ b/content/guides/reactjs/deploy.md @@ -0,0 +1,194 @@ +--- +title: Test your React.js deployment +linkTitle: Test your deployment +weight: 60 +keywords: deploy, kubernetes, react, react.js +description: Learn how to deploy locally to test and debug your Kubernetes deployment + +--- + +## Prerequisites + +Before you begin, make sure you’ve completed the following: +- Complete all the previous sections of this guide, starting with [Containerize React.js application](containerize.md). +- [Enable Kubernetes](/manuals/desktop/features/kubernetes.md#install-and-turn-on-kubernetes) in Docker Desktop. + +> **New to Kubernetes?** +> Visit the [Kubernetes basics tutorial](https://kubernetes.io/docs/tutorials/kubernetes-basics/) to get familiar with how clusters, pods, deployments, and services work. + +--- + +## Overview + +This section guides you through deploying your containerized React.js application locally using [Docker Desktop’s built-in Kubernetes](/desktop/kubernetes/). Running your app in a local Kubernetes cluster allows you to closely simulate a real production environment, enabling you to test, validate, and debug your workloads with confidence before promoting them to staging or production. + +--- + +## Create a Kubernetes YAML file + +Follow these steps to define your deployment configuration: + +1. In the root of your project, create a new file named: reactjs-sample-kubernetes.yaml + +2. Open the file in your IDE or preferred text editor. + +3. Add the following configuration, and be sure to replace `{DOCKER_USERNAME}` and `{DOCKERHUB_PROJECT_NAME}` with your actual Docker Hub username and repository name from the previous [Automate your builds with GitHub Actions](configure-github-actions.md). + + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: reactjs-sample + namespace: default +spec: + replicas: 1 + selector: + matchLabels: + app: reactjs-sample + template: + metadata: + labels: + app: reactjs-sample + spec: + containers: + - name: reactjs-container + image: {DOCKER_USERNAME}/{DOCKERHUB_PROJECT_NAME}:latest + imagePullPolicy: Always + ports: + - containerPort: 8080 +--- +apiVersion: v1 +kind: Service +metadata: + name: reactjs-sample-service + namespace: default +spec: + type: NodePort + selector: + app: reactjs-sample + ports: + - port: 8080 + targetPort: 8080 + nodePort: 30001 +``` + +This manifest defines two key Kubernetes resources, separated by `---`: + +- Deployment + Deploys a single replica of your React.js application inside a pod. The pod uses the Docker image built and pushed by your GitHub Actions CI/CD workflow + (refer to [Automate your builds with GitHub Actions](configure-github-actions.md)). + The container listens on port `8080`, which is typically used by [Nginx](https://nginx.org/en/docs/) to serve your production React app. + +- Service (NodePort) + Exposes the deployed pod to your local machine. + It forwards traffic from port `30001` on your host to port `8080` inside the container. + This lets you access the application in your browser at [http://localhost:30001](http://localhost:30001). + +> [!NOTE] +> To learn more about Kubernetes objects, see the [Kubernetes documentation](https://kubernetes.io/docs/home/). + +--- + +## Deploy and check your application + +Follow these steps to deploy your containerized React.js app into a local Kubernetes cluster and verify that it’s running correctly. + +### Step 1. Apply the Kubernetes configuration + +In your terminal, navigate to the directory where your `reactjs-sample-kubernetes.yaml` file is located, then deploy the resources using: + +```console + $ kubectl apply -f reactjs-sample-kubernetes.yaml +``` + +If everything is configured properly, you’ll see confirmation that both the Deployment and the Service were created: + +```shell + deployment.apps/reactjs-sample created + service/reactjs-sample-service created +``` + +This output means that both the Deployment and the Service were successfully created and are now running inside your local cluster. + +### Step 2. Check the Deployment status + +Run the following command to check the status of your deployment: + +```console + $ kubectl get deployments +``` + +You should see an output similar to: + +```shell + NAME READY UP-TO-DATE AVAILABLE AGE + reactjs-sample 1/1 1 1 14s +``` + +This confirms that your pod is up and running with one replica available. + +### Step 3. Verify the Service exposure + +Check if the NodePort service is exposing your app to your local machine: + +```console +$ kubectl get services +``` + +You should see something like: + +```shell +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +reactjs-sample-service NodePort 10.100.244.65 8080:30001/TCP 1m +``` + +This output confirms that your app is available via NodePort on port 30001. + +### Step 4. Access your app in the browser + +Open your browser and navigate to [http://localhost:30001](http://localhost:30001). + +You should see your production-ready React.js Sample application running — served by your local Kubernetes cluster. + +### Step 5. Clean up Kubernetes resources + +Once you're done testing, you can delete the deployment and service using: + +```console + $ kubectl delete -f reactjs-sample-kubernetes.yaml +``` + +Expected output: + +```shell + deployment.apps "reactjs-sample" deleted + service "reactjs-sample-service" deleted +``` + +This ensures your cluster stays clean and ready for the next deployment. + +--- + +## Summary + +In this section, you learned how to deploy your React.js application to a local Kubernetes cluster using Docker Desktop. This setup allows you to test and debug your containerized app in a production-like environment before deploying it to the cloud. + +What you accomplished: + +- Created a Kubernetes Deployment and NodePort Service for your React.js app +- Used `kubectl apply` to deploy the application locally +- Verified the app was running and accessible at `http://localhost:30001` +- Cleaned up your Kubernetes resources after testing + +--- + +## Related resources + +Explore official references and best practices to sharpen your Kubernetes deployment workflow: + +- [Kubernetes documentation](https://kubernetes.io/docs/home/) – Learn about core concepts, workloads, services, and more. +- [Deploy on Kubernetes with Docker Desktop](/manuals/desktop/features/kubernetes.md) – Use Docker Desktop’s built-in Kubernetes support for local testing and development. +- [`kubectl` CLI reference](https://kubernetes.io/docs/reference/kubectl/) – Manage Kubernetes clusters from the command line. +- [Kubernetes Deployment resource](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) – Understand how to manage and scale applications using Deployments. +- [Kubernetes Service resource](https://kubernetes.io/docs/concepts/services-networking/service/) – Learn how to expose your application to internal and external traffic. \ No newline at end of file diff --git a/content/guides/reactjs/develop.md b/content/guides/reactjs/develop.md new file mode 100644 index 000000000000..ea326cec1b7b --- /dev/null +++ b/content/guides/reactjs/develop.md @@ -0,0 +1,206 @@ +--- +title: Use containers for React.js development +linkTitle: Develop your app +weight: 30 +keywords: react.js, development, node +description: Learn how to develop your React.js application locally using containers. + +--- + +## Prerequisites + +Complete [Containerize React.js application](containerize.md). + +--- + +## Overview + +In this section, you'll learn how to set up both production and development environments for your containerized React.js application using Docker Compose. This setup allows you to serve a static production build via Nginx and to develop efficiently inside containers using a live-reloading dev server with Compose Watch. + +You’ll learn how to: +- Configure separate containers for production and development +- Enable automatic file syncing using Compose Watch in development +- Debug and live-preview your changes in real-time without manual rebuilds + +--- + +## Automatically update services (Development Mode) + +Use Compose Watch to automatically sync source file changes into your containerized development environment. This provides a seamless, efficient development experience without needing to restart or rebuild containers manually. + +## Step 1: Create a development Dockerfile + +Create a file named `Dockerfile.dev` in your project root with the following content: + +```dockerfile +# ========================================= +# Stage 1: Develop the React.js Application +# ========================================= +ARG NODE_VERSION=22.14.0-alpine + +# Use a lightweight Node.js image for development +FROM node:${NODE_VERSION} AS dev + +# Set the working directory inside the container +WORKDIR /app + +# Copy package-related files first to leverage Docker's caching mechanism +COPY package.json package-lock.json ./ + +# Install project dependencies +RUN --mount=type=cache,target=/root/.npm npm install + +# Copy the rest of the application source code into the container +COPY . . + +# Expose the port used by the Vite development server +EXPOSE 5173 + +# Use a default command, can be overridden in Docker compose.yml file +CMD ["npm", "run", "dev"] +``` + +This file sets up a lightweight development environment for your React app using the dev server. + + +### Step 2: Update your `compose.yaml` file + +Open your `compose.yaml` file and define two services: one for production (`react-prod`) and one for development (`react-dev`). + +Here’s an example configuration for a React.js application: + +```yaml +services: + react-prod: + build: + context: . + dockerfile: Dockerfile + image: docker-reactjs-sample + ports: + - "8080:8080" + + react-dev: + build: + context: . + dockerfile: Dockerfile.dev + ports: + - "5173:5173" + develop: + watch: + - action: sync + path: . + target: /app + +``` +- The `react-prod` service builds and serves your static production app using Nginx. +- The `react-dev` service runs your React development server with live reload and hot module replacement. +- `watch` triggers file sync with Compose Watch. + +> [!NOTE] +> For more details, see the official guide: [Use Compose Watch](/manuals/compose/how-tos/file-watch.md). + +### Step 3: Update vite.config.ts to ensure it works properly inside Docker + +To make Vite’s development server work reliably inside Docker, you need to update your vite.config.ts with the correct settings. + +Open the `vite.config.ts` file in your project root and update it as follows: + +```ts +/// + +import { defineConfig } from "vite"; +import react from "@vitejs/plugin-react"; + +export default defineConfig({ + base: "/", + plugins: [react()], + server: { + host: true, + port: 5173, + strictPort: true, + }, +}); +``` + +> [!NOTE] +> The `server` options in `vite.config.ts` are essential for running Vite inside Docker: +> - `host: true` allows the dev server to be accessible from outside the container. +> - `port: 5173` sets a consistent development port (must match the one exposed in Docker). +> - `strictPort: true` ensures Vite fails clearly if the port is unavailable, rather than switching silently. +> +> For full details, refer to the [Vite server configuration docs](https://vitejs.dev/config/server-options.html). + + +After completing the previous steps, your project directory should now contain the following files: + +```text +├── docker-reactjs-sample/ +│ ├── Dockerfile +│ ├── Dockerfile.dev +│ ├── .dockerignore +│ ├── compose.yaml +│ ├── nginx.conf +│ └── README.Docker.md +``` + +### Step 4: Start Compose Watch + +Run the following command from your project root to start your container in watch mode: + +```console +$ docker compose watch react-dev +``` + +### Step 5: Test Compose Watch with React + +To verify that Compose Watch is working correctly: + +1. Open the `src/App.tsx` file in your text editor. + +2. Locate the following line: + + ```html +

Vite + React

+ ``` + +3. Change it to: + + ```html +

Hello from Docker Compose Watch

+ ``` + +4. Save the file. + +5. Open your browser at [http://localhost:5173](http://localhost:5173). + +You should see the updated text appear instantly, without needing to rebuild the container manually. This confirms that file watching and automatic synchronization are working as expected. + +--- + +## Summary + +In this section, you set up a complete development and production workflow for your React.js application using Docker and Docker Compose. + +Here's what you achieved: +- Created a `Dockerfile.dev` to streamline local development with hot reloading +- Defined separate `react-dev` and `react-prod` services in your `compose.yaml` file +- Enabled real-time file syncing using Compose Watch for a smoother development experience +- Verified that live updates work seamlessly by modifying and previewing a component + +With this setup, you're now equipped to build, run, and iterate on your React.js app entirely within containers—efficiently and consistently across environments. + +--- + +## Related resources + +Deepen your knowledge and improve your containerized development workflow with these guides: + +- [Using Compose Watch](/manuals/compose/how-tos/file-watch.md) – Automatically sync source changes during development +- [Multi-stage builds](/manuals/build/building/multi-stage.md) – Create efficient, production-ready Docker images +- [Dockerfile best practices](/build/building/best-practices/) – Write clean, secure, and optimized Dockerfiles. +- [Compose file reference](/compose/compose-file/) – Learn the full syntax and options available for configuring services in `compose.yaml`. +- [Docker volumes](/storage/volumes/) – Persist and manage data between container runs + +## Next steps + +In the next section, you'll learn how to run unit tests for your React.js application inside Docker containers. This ensures consistent testing across all environments and removes dependencies on local machine setup. diff --git a/content/guides/reactjs/run-tests.md b/content/guides/reactjs/run-tests.md new file mode 100644 index 000000000000..dea7d484c664 --- /dev/null +++ b/content/guides/reactjs/run-tests.md @@ -0,0 +1,180 @@ +--- +title: Run React.js tests in a container +linkTitle: Run your tests +weight: 40 +keywords: react.js, react, test, vitest +description: Learn how to run your React.js tests in a container. + +--- + +## Prerequisites + +Complete all the previous sections of this guide, starting with [Containerize React.js application](containerize.md). + +## Overview + +Testing is a critical part of the development process. In this section, you'll learn how to: + +- Run unit tests using Vitest inside a Docker container. +- Use Docker Compose to run tests in an isolated, reproducible environment. + +You’ll use [Vitest](https://vitest.dev) — a blazing fast test runner designed for Vite — along with [Testing Library](https://testing-library.com/) for assertions. + +--- + +## Run tests during development + +`docker-reactjs-sample` application includes a sample test file at location: + +```console +$ src/App.test.tsx +``` + +This file uses Vitest and React Testing Library to verify the behavior of `App` component. + +### Step 1: Install Vitest and React Testing Library + +If you haven’t already added the necessary testing tools, install them by running: + +```console +$ npm install --save-dev vitest @testing-library/react @testing-library/jest-dom jsdom +``` + +Then, update the scripts section of your `package.json` file to include the following: + +```json +"scripts": { + "test": "vitest run" +} +``` + +--- + +### Step 2: Configure Vitest + +Update `vitest.config.ts` file in your project root with the following configuration: + +```ts {hl_lines="14-18",linenos=true} +/// + +import { defineConfig } from "vite"; +import react from "@vitejs/plugin-react"; + +export default defineConfig({ + base: "/", + plugins: [react()], + server: { + host: true, + port: 5173, + strictPort: true, + }, + test: { + environment: "jsdom", + setupFiles: "./src/setupTests.ts", + globals: true, + }, +}); +``` + +> [!NOTE] +> The `test` options in `vitest.config.ts` are essential for reliable testing inside Docker: +> - `environment: "jsdom"` simulates a browser-like environment for rendering and DOM interactions. +> - `setupFiles: "./src/setupTests.ts"` loads global configuration or mocks before each test file (optional but recommended). +> - `globals: true` enables global test functions like `describe`, `it`, and `expect` without importing them. +> +> For more details, see the official [Vitest configuration docs](https://vitest.dev/config/). + +### Step 3: Update compose.yaml + +Add a new service named `react-test` to your `compose.yaml` file. This service allows you to run your test suite in an isolated containerized environment. + +```yaml {hl_lines="22-26",linenos=true} +services: + react-dev: + build: + context: . + dockerfile: Dockerfile.dev + ports: + - "5173:5173" + develop: + watch: + - action: sync + path: . + target: /app + + react-prod: + build: + context: . + dockerfile: Dockerfile + image: docker-reactjs-sample + ports: + - "8080:8080" + + react-test: + build: + context: . + dockerfile: Dockerfile.dev + command: ["npm", "run", "test"] + +``` + +The react-test service reuses the same `Dockerfile.dev` used for [development](develop.md) and overrides the default command to run tests with `npm run test`. This setup ensures a consistent test environment that matches your local development configuration. + + +After completing the previous steps, your project directory should contain the following files: + +```text +├── docker-reactjs-sample/ +│ ├── Dockerfile +│ ├── Dockerfile.dev +│ ├── .dockerignore +│ ├── compose.yaml +│ ├── nginx.conf +│ └── README.Docker.md +``` + +### Step 4: Run the tests + +To execute your test suite inside the container, run the following command from your project root: + +```console +$ docker compose run --rm react-test +``` + +This command will: +- Start the `react-test` service defined in your `compose.yaml` file. +- Execute the `npm run test` script using the same environment as development. +- Automatically remove the container after the tests complete [`docker compose run --rm`](/engine/reference/commandline/compose_run) command. + +> [!NOTE] +> For more information about Compose commands, see the [Compose CLI +> reference](/reference/cli/docker/compose/_index.md). + +--- + +## Summary + +In this section, you learned how to run unit tests for your React.js application inside a Docker container using Vitest and Docker Compose. + +What you accomplished: +- Installed and configured Vitest and React Testing Library for testing React components. +- Created a `react-test` service in `compose.yaml` to isolate test execution. +- Reused the development `Dockerfile.dev` to ensure consistency between dev and test environments. +- Ran tests inside the container using `docker compose run --rm react-test`. +- Ensured reliable, repeatable testing across environments without relying on local machine setup. + +--- + +## Related resources + +Explore official references and best practices to sharpen your Docker testing workflow: + +- [Dockerfile reference](/reference/dockerfile/) – Understand all Dockerfile instructions and syntax. +- [Best practices for writing Dockerfiles](/develop/develop-images/dockerfile_best-practices/) – Write efficient, maintainable, and secure Dockerfiles. +- [Compose file reference](/compose/compose-file/) – Learn the full syntax and options available for configuring services in `compose.yaml`. +- [`docker compose run` CLI reference](/reference/cli/docker/compose/run/) – Run one-off commands in a service container. +--- + +## Next steps + +Next, you’ll learn how to set up a CI/CD pipeline using GitHub Actions to automatically build and test your React.js application in a containerized environment. This ensures your code is validated on every push or pull request, maintaining consistency and reliability across your development workflow. diff --git a/content/guides/ruby/_index.md b/content/guides/ruby/_index.md index a83da12e1258..5652dc7fe5b2 100644 --- a/content/guides/ruby/_index.md +++ b/content/guides/ruby/_index.md @@ -12,6 +12,7 @@ aliases: - /language/ruby/ - /guides/language/ruby/ languages: [ruby] +tags: [frameworks] params: time: 20 minutes --- @@ -19,8 +20,8 @@ params: The Ruby language-specific guide teaches you how to containerize a Ruby on Rails application using Docker. In this guide, you’ll learn how to: - Containerize and run a Ruby on Rails application +- Configure a GitHub Actions workflow to build and push a Docker image to Docker Hub - Set up a local environment to develop a Ruby on Rails application using containers -- Configure a CI/CD pipeline for a containerized Ruby on Rails application using GitHub Actions - Deploy your containerized Ruby on Rails application locally to Kubernetes to test and debug your deployment Start by containerizing an existing Ruby on Rails application. diff --git a/content/guides/ruby/configure-ci-cd.md b/content/guides/ruby/configure-ci-cd.md deleted file mode 100644 index b4a440519b7b..000000000000 --- a/content/guides/ruby/configure-ci-cd.md +++ /dev/null @@ -1,132 +0,0 @@ ---- -title: Configure CI/CD for your Ruby on Rails application -linkTitle: Configure CI/CD -weight: 40 -keywords: ci/cd, github actions, ruby, flask -description: Learn how to configure CI/CD using GitHub Actions for your Ruby on Rails application. -aliases: - - /language/ruby/configure-ci-cd/ - - /guides/language/ruby/configure-ci-cd/ ---- - -## Prerequisites - -Complete all the previous sections of this guide, starting with [Containerize a Ruby on Rails application](containerize.md). You must have a [GitHub](https://github.com/signup) account and a [Docker](https://hub.docker.com/signup) account to complete this section. - -## Overview - -In this section, you'll learn how to set up and use GitHub Actions to build and test your Docker image as well as push it to Docker Hub. You will complete the following steps: - -1. Create a new repository on GitHub. -2. Define the GitHub Actions workflow. -3. Run the workflow. - -## Step one: Create the repository - -Create a GitHub repository, configure the Docker Hub credentials, and push your source code. - -1. [Create a new repository](https://github.com/new) on GitHub. - -2. Open the repository **Settings**, and go to **Secrets and variables** > - **Actions**. - -3. Create a new **Repository variable** named `DOCKER_USERNAME` and your Docker ID as value. - -4. Create a new [Personal Access Token (PAT)](/manuals/security/for-developers/access-tokens.md#create-an-access-token) for Docker Hub. You can name this token `docker-tutorial`. Make sure access permissions include Read and Write. - -5. Add the PAT as a **Repository secret** in your GitHub repository, with the name - `DOCKERHUB_TOKEN`. - -6. In your local repository on your machine, run the following command to change - the origin to the repository you just created. Make sure you change - `your-username` to your GitHub username and `your-repository` to the name of - the repository you created. - - ```console - $ git remote set-url origin https://github.com/your-username/your-repository.git - ``` - -7. Run the following commands to stage, commit, and push your local repository to GitHub. - - ```console - $ git add -A - $ git commit -m "my commit" - $ git push -u origin main - ``` - -## Step two: Set up the workflow - -Set up your GitHub Actions workflow for building, testing, and pushing the image -to Docker Hub. - -1. Go to your repository on GitHub and then select the **Actions** tab. - -2. Select **set up a workflow yourself**. - - This takes you to a page for creating a new GitHub actions workflow file in - your repository, under `.github/workflows/main.yml` by default. - -3. In the editor window, copy and paste the following YAML configuration. - - ```yaml - name: ci - - on: - push: - branches: - - main - - jobs: - build: - runs-on: ubuntu-latest - steps: - - name: Login to Docker Hub - uses: docker/login-action@v3 - with: - username: ${{ vars.DOCKER_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Build and push - uses: docker/build-push-action@v6 - with: - push: true - tags: ${{ vars.DOCKER_USERNAME }}/${{ github.event.repository.name }}:latest - ``` - - For more information about the YAML syntax for `docker/build-push-action`, - refer to the [GitHub Action README](https://github.com/docker/build-push-action/blob/master/README.md). - -## Step three: Run the workflow - -Save the workflow file and run the job. - -1. Select **Commit changes...** and push the changes to the `main` branch. - - After pushing the commit, the workflow starts automatically. - -2. Go to the **Actions** tab. It displays the workflow. - - Selecting the workflow shows you the breakdown of all the steps. - -3. When the workflow is complete, go to your - [repositories on Docker Hub](https://hub.docker.com/repositories). - - If you see the new repository in that list, it means the GitHub Actions - successfully pushed the image to Docker Hub. - -## Summary - -In this section, you learned how to set up a GitHub Actions workflow for your Ruby on Rails application. - -Related information: - -- [Introduction to GitHub Actions](/guides/gha.md) -- [Docker Build GitHub Actions](/manuals/build/ci/github-actions/_index.md) -- [Workflow syntax for GitHub Actions](https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions) - -## Next steps - -Next, learn how you can locally test and debug your workloads on Kubernetes before deploying. diff --git a/content/guides/ruby/configure-github-actions.md b/content/guides/ruby/configure-github-actions.md new file mode 100644 index 000000000000..5203f158e1ea --- /dev/null +++ b/content/guides/ruby/configure-github-actions.md @@ -0,0 +1,111 @@ +--- +title: Automate your builds with GitHub Actions +linkTitle: Automate your builds with GitHub Actions +weight: 20 +keywords: ci/cd, github actions, ruby, flask +description: Learn how to configure CI/CD using GitHub Actions for your Ruby on Rails application. +aliases: + - /language/ruby/configure-ci-cd/ + - /guides/language/ruby/configure-ci-cd/ + - /guides/ruby/configure-ci-cd/ +--- + +## Prerequisites + +Complete all the previous sections of this guide, starting with [Containerize a Ruby on Rails application](containerize.md). You must have a [GitHub](https://github.com/signup) account and a [Docker](https://hub.docker.com/signup) account to complete this section. + +If you didn't create a [GitHub repository](https://github.com/new) for your project yet, it is time to do it. After creating the repository, don't forget to [add a remote](https://docs.github.com/en/get-started/getting-started-with-git/managing-remote-repositories) and ensure you can commit and [push your code](https://docs.github.com/en/get-started/using-git/pushing-commits-to-a-remote-repository#about-git-push) to GitHub. + +1. In your project's GitHub repository, open **Settings**, and go to **Secrets and variables** > **Actions**. + +2. Under the **Variables** tab, create a new **Repository variable** named `DOCKER_USERNAME` and your Docker ID as a value. + +3. Create a new [Personal Access Token (PAT)](/manuals/security/access-tokens.md#create-an-access-token) for Docker Hub. You can name this token `docker-tutorial`. Make sure access permissions include Read and Write. + +4. Add the PAT as a **Repository secret** in your GitHub repository, with the name + `DOCKERHUB_TOKEN`. + +## Overview + +GitHub Actions is a CI/CD (Continuous Integration and Continuous Deployment) automation tool built into GitHub. It allows you to define custom workflows for building, testing, and deploying your code when specific events occur (e.g., pushing code, creating a pull request, etc.). A workflow is a YAML-based automation script that defines a sequence of steps to be executed when triggered. Workflows are stored in the `.github/workflows/` directory of a repository. + +In this section, you'll learn how to set up and use GitHub Actions to build your Docker image as well as push it to Docker Hub. You will complete the following steps: + +1. Define the GitHub Actions workflow. +2. Run the workflow. + +## 1. Define the GitHub Actions workflow + +You can create a GitHub Actions workflow by creating a YAML file in the `.github/workflows/` directory of your repository. To do this use your favorite text editor or the GitHub web interface. The following steps show you how to create a workflow file using the GitHub web interface. + +If you prefer to use the GitHub web interface, follow these steps: + +1. Go to your repository on GitHub and then select the **Actions** tab. + +2. Select **set up a workflow yourself**. + + This takes you to a page for creating a new GitHub Actions workflow file in + your repository. By default, the file is created under `.github/workflows/main.yml`, let's change it name to `build.yml`. + +If you prefer to use your text editor, create a new file named `build.yml` in the `.github/workflows/` directory of your repository. + +Add the following content to the file: + +```yaml +name: Build and push Docker image + +on: + push: + branches: + - main + +jobs: + build_and_push: + runs-on: ubuntu-latest + steps: + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ vars.DOCKER_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Build and push + uses: docker/build-push-action@v6 + with: + push: true + tags: ${{ vars.DOCKER_USERNAME }}/${{ github.event.repository.name }}:latest +``` + +Each GitHub Actions workflow includes one or several jobs. Each job consists of steps. Each step can either run a set of commands or use already [existing actions](https://github.com/marketplace?type=actions). The action above has three steps: + +1. [**Login to Docker Hub**](https://github.com/docker/login-action): Action logs in to Docker Hub using the Docker ID and Personal Access Token (PAT) you created earlier. + +2. [**Set up Docker Buildx**](https://github.com/docker/setup-buildx-action): Action sets up Docker [Buildx](https://github.com/docker/buildx), a CLI plugin that extends the capabilities of the Docker CLI. + +3. [**Build and push**](https://github.com/docker/build-push-action): Action builds and pushes the Docker image to Docker Hub. The `tags` parameter specifies the image name and tag. The `latest` tag is used in this example. + +## 2. Run the workflow + +Let's commit the changes, push them to the `main` branch. In the workflow above, the trigger is set to `push` events on the `main` branch. This means that the workflow will run every time you push changes to the `main` branch. You can find more information about the workflow triggers [here](https://docs.github.com/en/actions/writing-workflows/choosing-when-your-workflow-runs/events-that-trigger-workflows). + +Go to the **Actions** tab of you GitHub repository. It displays the workflow. Selecting the workflow shows you the breakdown of all the steps. + +When the workflow is complete, go to your [repositories on Docker Hub](https://hub.docker.com/repositories). If you see the new repository in that list, it means the GitHub Actions workflow successfully pushed the image to Docker Hub. + +## Summary + +In this section, you learned how to set up a GitHub Actions workflow for your Ruby on Rails application. + +Related information: + +- [Introduction to GitHub Actions](/guides/gha.md) +- [Docker Build GitHub Actions](/manuals/build/ci/github-actions/_index.md) +- [Workflow syntax for GitHub Actions](https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions) + +## Next steps + +In the next section, you'll learn how you can develop your application using containers. + diff --git a/content/guides/ruby/containerize.md b/content/guides/ruby/containerize.md index ed87769c0516..18fbd21eef71 100644 --- a/content/guides/ruby/containerize.md +++ b/content/guides/ruby/containerize.md @@ -24,7 +24,7 @@ Starting from Rails 7.1 [Docker is supported out of the box](https://guides.ruby If you have an existing Rails application, you will need to create the Docker assets manually. Unfortunately `docker init` command does not yet support Rails. This means that if you are working with Rails, you'll need to copy Dockerfile and other related configurations manually from the examples below. -## Initialize Docker assets +## 1. Initialize Docker assets Rails 7.1 generates multistage Dockerfile out of the box, below is an example of such file generated from a Rails template. @@ -232,7 +232,7 @@ To learn more about the files, see the following: - [compose.yaml](/reference/compose-file/_index.md) - [docker-entrypoint](/reference/dockerfile/#entrypoint) -## Run the application +## 2. Run the application To run the application, run the following command in a terminal inside the application's directory. @@ -244,7 +244,7 @@ Open a browser and view the application at [http://localhost:3000](http://localh In the terminal, press `ctrl`+`c` to stop the application. -### Run the application in the background +## 3. Run the application in the background You can run the application detached from the terminal by adding the `-d` option. Inside the `docker-ruby-on-rails` directory, run the following command @@ -278,5 +278,4 @@ Related information: ## Next steps -In the next section, you'll learn how you can develop your application using -containers. +In the next section, you'll take a look at how to set up a CI/CD pipeline using GitHub Actions. \ No newline at end of file diff --git a/content/guides/ruby/deploy.md b/content/guides/ruby/deploy.md index d86ef14bd22c..91d511d2d03d 100644 --- a/content/guides/ruby/deploy.md +++ b/content/guides/ruby/deploy.md @@ -24,7 +24,7 @@ In your `docker-ruby-on-rails` directory, create a file named `docker-ruby-on-rails-kubernetes.yaml`. Open the file in an IDE or text editor and add the following contents. Replace `DOCKER_USERNAME/REPO_NAME` with your Docker username and the name of the repository that you created in [Configure CI/CD for -your Ruby on Rails application](configure-ci-cd.md). +your Ruby on Rails application](configure-github-actions.md). ```yaml apiVersion: apps/v1 @@ -68,7 +68,7 @@ In this Kubernetes YAML file, there are two objects, separated by the `---`: you'll get just one replica, or copy of your pod. That pod, which is described under `template`, has just one container in it. The container is created from the image built by GitHub Actions in [Configure CI/CD for - your Ruby on Rails application](configure-ci-cd.md). + your Ruby on Rails application](configure-github-actions.md). - A NodePort service, which will route traffic from port 30001 on your host to port 8001 inside the pods it routes to, allowing you to reach your app from the network. diff --git a/content/guides/ruby/develop.md b/content/guides/ruby/develop.md index 96aabf844320..7b7b94b4729f 100644 --- a/content/guides/ruby/develop.md +++ b/content/guides/ruby/develop.md @@ -1,7 +1,7 @@ --- title: Use containers for Ruby on Rails development linkTitle: Develop your app -weight: 20 +weight: 40 keywords: ruby, local, development description: Learn how to develop your Ruby on Rails application locally. aliases: @@ -176,7 +176,7 @@ $ docker compose watch Any changes to the application's source files on your local machine will now be immediately reflected in the running container. -Open `docker-ruby-on-rails/app/views/whales/index.html.erb` in an IDE or text editor and update the `Whales` string by adding a exclamation marks. +Open `docker-ruby-on-rails/app/views/whales/index.html.erb` in an IDE or text editor and update the `Whales` string by adding an exclamation mark. ```diff -

Whales

@@ -200,4 +200,4 @@ Related information: ## Next steps -In the next section, you'll take a look at how to set up a CI/CD pipeline using GitHub Actions. +In the next section, you'll learn how you can locally test and debug your workloads on Kubernetes before deploying. diff --git a/content/guides/rust/build-images.md b/content/guides/rust/build-images.md index edf2a0b8210d..d2e414274728 100644 --- a/content/guides/rust/build-images.md +++ b/content/guides/rust/build-images.md @@ -25,7 +25,7 @@ dependencies, and any other file system objects required. Clone the sample application to use with this guide. Open a terminal, change directory to a directory that you want to work in, and run the following command to clone the repository: ```console -$ git clone https://github.com/docker/docker-rust-hello +$ git clone https://github.com/docker/docker-rust-hello && cd docker-rust-hello ``` ## Create a Dockerfile for Rust diff --git a/content/guides/rust/configure-ci-cd.md b/content/guides/rust/configure-ci-cd.md index 027b0b8dcf4e..5c012a1a3ab4 100644 --- a/content/guides/rust/configure-ci-cd.md +++ b/content/guides/rust/configure-ci-cd.md @@ -30,9 +30,9 @@ Create a GitHub repository, configure the Docker Hub credentials, and push your 2. Open the repository **Settings**, and go to **Secrets and variables** > **Actions**. -3. Create a new **Repository variable** named `DOCKER_USERNAME` and your Docker ID as value. +3. Create a new **Repository variable** named `DOCKER_USERNAME` and your Docker ID as a value. -4. Create a new [Personal Access Token (PAT)](/manuals/security/for-developers/access-tokens.md#create-an-access-token) for Docker Hub. You can name this token `docker-tutorial`. Make sure access permissions include Read and Write. +4. Create a new [Personal Access Token (PAT)](/manuals/security/access-tokens.md#create-an-access-token) for Docker Hub. You can name this token `docker-tutorial`. Make sure access permissions include Read and Write. 5. Add the PAT as a **Repository secret** in your GitHub repository, with the name `DOCKERHUB_TOKEN`. diff --git a/content/guides/sentiment-analysis.md b/content/guides/sentiment-analysis.md index ea6f5ab5a413..725899efe9fc 100644 --- a/content/guides/sentiment-analysis.md +++ b/content/guides/sentiment-analysis.md @@ -331,7 +331,7 @@ To run the application using Docker: > > For Windows users, you may get an error when running the container. Verify > that the line endings in the `entrypoint.sh` are `LF` (`\n`) and not `CRLF` (`\r\n`), - > then rebuild the image. For more details, see [Avoid unexpected syntax errors, use Unix style line endings for files in containers](/desktop/troubleshoot-and-support/troubleshoot/topics/#avoid-unexpected-syntax-errors-use-unix-style-line-endings-for-files-in-containers). + > then rebuild the image. For more details, see [Avoid unexpected syntax errors, use Unix style line endings for files in containers](/desktop/troubleshoot-and-support/troubleshoot/topics/#Unexpected-syntax-errors-use-Unix-style-line endings-for-files-in-containers). You will see the following in your console after the container starts. diff --git a/content/guides/swarm-deploy.md b/content/guides/swarm-deploy.md index 39390a5fe468..ff04acec9554 100644 --- a/content/guides/swarm-deploy.md +++ b/content/guides/swarm-deploy.md @@ -13,7 +13,7 @@ params: time: 10 minutes --- -{{< include "swarm-mode.md" >}} +{{% include "swarm-mode.md" %}} ## Prerequisites @@ -35,7 +35,7 @@ Swarm never creates individual containers like you did in the previous step of t Now you can write a simple stack file to run and manage your Todo app, the container `getting-started` image created in [Part 2](02_our_app.md) of the tutorial. Place the following in a file called `bb-stack.yaml`: -{{< include "swarm-compose-compat.md" >}} +{{% include "swarm-compose-compat.md" %}} ```yaml version: "3.7" diff --git a/content/guides/tensorflowjs.md b/content/guides/tensorflowjs.md index ecbd89bc6b48..cd988f74aebf 100644 --- a/content/guides/tensorflowjs.md +++ b/content/guides/tensorflowjs.md @@ -533,7 +533,7 @@ developer ecosystem. To share your image: 4. Verify that you pushed the image to Docker Hub. 1. Go to [Docker Hub](https://hub.docker.com). - 2. Select **Repositories**. + 2. Select **My Hub** > **Repositories**. 3. View the **Last pushed** time for your repository. Other users can now download and run your image using the `docker run` command. They need to replace `YOUR-USER-NAME` with your Docker ID. diff --git a/content/guides/testcontainers-cloud/_index.md b/content/guides/testcontainers-cloud/_index.md index 2d217f2a3eb9..9cd9b4bebfca 100644 --- a/content/guides/testcontainers-cloud/_index.md +++ b/content/guides/testcontainers-cloud/_index.md @@ -38,7 +38,7 @@ Testcontainers Cloud is a cloud-based solution designed to streamline and enhanc Works well with Docker Desktop, GitHub Actions, Jenkins, Kubernetes, and other CI solutions -Docker Pro, Team, and Business subscriptions come with Testcontainers Cloud runtime minutes, and additional minutes are available via consumption pricing. +Docker Pro, Team, and Business subscriptions come with Testcontainers Cloud runtime minutes, and additional minutes are available via consumption pricing. Testcontainers Cloud runtime minutes do not rollover month to month. ## Who’s this for? diff --git a/content/guides/text-classification.md b/content/guides/text-classification.md index 6c3e6ee4a8fe..81df374797a9 100644 --- a/content/guides/text-classification.md +++ b/content/guides/text-classification.md @@ -390,7 +390,7 @@ To run the application using Docker: > > For Windows users, you may get an error when running the container. Verify > that the line endings in the `entrypoint.sh` are `LF` (`\n`) and not `CRLF` (`\r\n`), - > then rebuild the image. For more details, see [Avoid unexpected syntax errors, use Unix style line endings for files in containers](/desktop/troubleshoot-and-support/troubleshoot/topics/#avoid-unexpected-syntax-errors-use-unix-style-line-endings-for-files-in-containers). + > then rebuild the image. For more details, see [Avoid unexpected syntax errors, use Unix style line endings for files in containers](/desktop/troubleshoot-and-support/troubleshoot/topics/#Unexpected-syntax-errors-use-Unix-style-line endings-for-files-in-containers). You will see the following in your console after the container starts. diff --git a/content/guides/text-summarization.md b/content/guides/text-summarization.md index ef6cbd290dfb..e4657f072b12 100644 --- a/content/guides/text-summarization.md +++ b/content/guides/text-summarization.md @@ -324,7 +324,7 @@ To run the application using Docker: > > For Windows users, you may get an error when running the container. Verify > that the line endings in the `entrypoint.sh` are `LF` (`\n`) and not `CRLF` (`\r\n`), - > then rebuild the image. For more details, see [Avoid unexpected syntax errors, use Unix style line endings for files in containers](/desktop/troubleshoot-and-support/troubleshoot/topics/#avoid-unexpected-syntax-errors-use-unix-style-line-endings-for-files-in-containers). + > then rebuild the image. For more details, see [Avoid unexpected syntax errors, use Unix style line endings for files in containers](/desktop/troubleshoot-and-support/troubleshoot/topics/#Unexpected-syntax-errors-use-Unix-style-line endings-for-files-in-containers). You will see the following in your console after the container starts. diff --git a/content/guides/zscaler/index.md b/content/guides/zscaler/index.md index 56c35463d534..25cbe427f1d9 100644 --- a/content/guides/zscaler/index.md +++ b/content/guides/zscaler/index.md @@ -46,7 +46,7 @@ necessary. If you are not using Zscaler as a system-level proxy, manually configure proxy settings in Docker Desktop. Set up proxy settings for all clients in the -organization using [Settings Management](/manuals/security/for-admins/hardened-desktop/settings-management/_index.md), +organization using [Settings Management](/manuals/enterprise/security/hardened-desktop/settings-management/_index.md), or edit proxy configuration in the Docker Desktop GUI under [**Settings > Resources > Proxies**](/manuals/desktop/settings-and-maintenance/settings.md#proxies). ## Install root certificates in Docker images @@ -84,13 +84,13 @@ like this: ```dockerfile FROM debian:bookworm -COPY zscaler-cert.pem /usr/local/share/ca-certificates/zscaler-cert.pem +COPY zscaler-root-ca.crt /usr/local/share/ca-certificates/zscaler-root-ca.crt RUN apt-get update && \ apt-get install -y ca-certificates && \ update-ca-certificates ``` -Here, `zscaler-cert.pem` is the root certificate, located at the root of the +Here, `zscaler-root-ca.crt` is the root certificate, located at the root of the build context (often within the application's Git repository). If you use an artifact repository, you can fetch the certificate directly using @@ -100,7 +100,7 @@ the content digest of the certificate is correct. ```dockerfile FROM debian:bookworm ADD --checksum=sha256:24454f830cdb571e2c4ad15481119c43b3cafd48dd869a9b2945d1036d1dc68d \ - https://artifacts.example/certs/zscaler-cert.pem /usr/local/share/ca-certificates/zscaler-cert.pem + https://artifacts.example/certs/zscaler-root-ca.crt /usr/local/share/ca-certificates/zscaler-root-ca.crt RUN apt-get update && \ apt-get install -y ca-certificates && \ update-ca-certificates @@ -123,7 +123,7 @@ RUN --mount=target=. cmake -B output/ FROM debian:bookworm-slim AS final ADD --checksum=sha256:24454f830cdb571e2c4ad15481119c43b3cafd48dd869a9b2945d1036d1dc68d \ - https://artifacts.example/certs/zscaler-cert.pem /usr/local/share/ca-certificates/zscaler-cert.pem + https://artifacts.example/certs/zscaler-root-ca.crt /usr/local/share/ca-certificates/zscaler-root-ca.crt RUN apt-get update && \ apt-get install -y ca-certificates && \ update-ca-certificates diff --git a/content/includes/admin-company-overview.md b/content/includes/admin-company-overview.md deleted file mode 100644 index 9595e947572d..000000000000 --- a/content/includes/admin-company-overview.md +++ /dev/null @@ -1,22 +0,0 @@ -A company provides a single point of visibility across multiple organizations. This view simplifies the management of Docker organizations and settings. Organization owners with a Docker Business subscription can create a company and then manage it through the [Docker Admin Console](https://app.docker.com/admin). - -The following diagram depicts the setup of a company and how it relates to associated organizations. - -![company-hierarchy](/admin/images/docker-admin-structure.webp) - -## Key features - -With a company, administrators can: - -- View and manage all nested organizations and configure settings centrally -- Carefully control access to the company and company settings -- Have up to ten unique users assigned the company owner role -- Configure SSO and SCIM for all nested organizations -- Enforce SSO for all users in the company - -## Prerequisites - -Before you create a company, verify the following: - -- Any organizations you want to add to a company have a Docker Business subscription -- You're an organization owner for your organization and any additional organizations you want to add diff --git a/content/includes/admin-early-access.md b/content/includes/admin-early-access.md deleted file mode 100644 index 4a11e6107dd8..000000000000 --- a/content/includes/admin-early-access.md +++ /dev/null @@ -1,8 +0,0 @@ ---- ---- - -{{% restricted title="Early Access" %}} -The Docker Admin Console is an [early access](/release-lifecycle#early-access-ea) product. - -It's available to all company owners and organization owners. You can still manage organizations in Docker Hub, but the Admin Console includes company-level management and enhanced features for organization management. -{{% /restricted %}} diff --git a/content/includes/admin-org-overview.md b/content/includes/admin-org-overview.md deleted file mode 100644 index 3aff83bc2fea..000000000000 --- a/content/includes/admin-org-overview.md +++ /dev/null @@ -1,14 +0,0 @@ -An organization in Docker is a collection of teams and repositories -that can be managed together. A team is a group of Docker members that belong to an organization. -An organization can have multiple teams. Members don't have to be added to a team to be part of an organization. - -Docker users become members of an organization once they're associated with that organization by an organization owner. An organization owner is a user with administrative access to the organization. - -Owners can invite users, assign them roles, create new teams, and add -members to an existing team using their Docker ID or email address. An organization owner can also add -additional owners to help them manage users, teams, and repositories in the -organization. - -The following diagram depicts the setup of an organization and how it relates to teams. Teams are an optional feature that owners can use to group members and assign permissions. - -![organization-hierarchy](/admin/images/org-structure.webp) diff --git a/content/includes/compose-bridge-experimental.md b/content/includes/compose-bridge-experimental.md deleted file mode 100644 index 472486f27a1d..000000000000 --- a/content/includes/compose-bridge-experimental.md +++ /dev/null @@ -1,6 +0,0 @@ ---- ---- - -{{% experimental %}} -Compose Bridge is an [Experimental](/manuals/release-lifecycle.md#experimental) product. -{{% /experimental %}} diff --git a/content/includes/compose/profiles.md b/content/includes/compose/profiles.md index 03802cedba14..b019bde24285 100644 --- a/content/includes/compose/profiles.md +++ b/content/includes/compose/profiles.md @@ -1 +1 @@ -Profiles help you adjust your Compose application for different environments or use cases by selectively activating services. Services can be assigned to one or more profiles; unassigned services start by default, while assigned ones only start when their profile is active. This setup means specific services, like those for debugging or development, to be included in a single `compose.yml` file and activated only as needed. +Profiles help you adjust your Compose application for different environments or use cases by selectively activating services. Services can be assigned to one or more profiles; unassigned services start/stop by default, while assigned ones only start/stop when their profile is active. This setup means specific services, like those for debugging or development, to be included in a single `compose.yml` file and activated only as needed. diff --git a/content/includes/desktop-linux-launch.md b/content/includes/desktop-linux-launch.md index 003d5536b9d4..fe90fc1bef5c 100644 --- a/content/includes/desktop-linux-launch.md +++ b/content/includes/desktop-linux-launch.md @@ -1,6 +1,6 @@ To start Docker Desktop for Linux: -1. Open your **Applications** menu in Gnome/KDE Desktop and search for **Docker Desktop**. +1. Navigate to the Docker Desktop application in your Gnome/KDE Desktop. 2. Select **Docker Desktop** to start Docker. diff --git a/content/includes/dockerfile-labs-channel.md b/content/includes/dockerfile-labs-channel.md index 029b25a63503..8a7bca00d15b 100644 --- a/content/includes/dockerfile-labs-channel.md +++ b/content/includes/dockerfile-labs-channel.md @@ -1,7 +1,3 @@ ---- ---- - -{{% experimental %}} -The "labs" channel provides early access to Dockerfile features that are not -yet available in the stable channel. -{{% /experimental %}} +> [!IMPORTANT] +> +> The labs channel gives you access to experimental Dockerfile features not yet available in the stable channel. \ No newline at end of file diff --git a/content/includes/gordondhi.md b/content/includes/gordondhi.md new file mode 100644 index 000000000000..909be24cd51d --- /dev/null +++ b/content/includes/gordondhi.md @@ -0,0 +1,28 @@ +1. Ensure Gordon is [enabled](/manuals/ai/gordon.md#enable-ask-gordon). +1. In Gordon's Toolbox, ensure Gordon's [Developer MCP toolkit is enabled](/manuals/ai/gordon/mcp/built-in-tools.md#configuration). +1. In the terminal, navigate to the directory containing your Dockerfile. +1. Start a conversation with Gordon: + ```bash + docker ai + ``` +1. Type: + ```console + "Migrate my dockerfile to DHI" + ``` +1. Follow the conversation with Gordon. Gordon will edit your Dockerfile, so when + it requests access to the filesystem and more, type `yes` to allow Gordon to proceed. + + > [!NOTE] + > To learn more about Gordon's data retention and the data it + > can access, see [Gordon](/manuals/ai/gordon.md#what-data-does-gordon-access). + +When the migration is complete, you see a success message: + +```text +The migration to Docker Hardened Images (DHI) is complete. The updated Dockerfile +successfully builds the image, and no vulnerabilities were detected in the final image. +The functionality and optimizations of the original Dockerfile have been preserved. +``` + +> [!IMPORTANT] +> As with any AI tool, you must verify Gordon's edits and test your image. diff --git a/content/includes/hub-limits.md b/content/includes/hub-limits.md deleted file mode 100644 index 69373200884f..000000000000 --- a/content/includes/hub-limits.md +++ /dev/null @@ -1,8 +0,0 @@ ---- ---- - -> [!NOTE] -> -> The Docker Hub plan limits will take effect on March 1, 2025. No charges on -> Docker Hub image pulls or storage will be incurred between December 10, 2024, -> and February 28, 2025. \ No newline at end of file diff --git a/content/includes/hub-org-management.md b/content/includes/hub-org-management.md new file mode 100644 index 000000000000..b75bb77bf96d --- /dev/null +++ b/content/includes/hub-org-management.md @@ -0,0 +1,6 @@ +> [!IMPORTANT] +> +> Organization management is moving to the Admin Console. +> +> Manage members, team, settings, and activity logs in the Docker Admin Console. +> Access to these features in Docker Hub will end soon. Explore the [Admin Console](https://app.docker.com/admin). \ No newline at end of file diff --git a/content/includes/tax-compliance.md b/content/includes/tax-compliance.md index fdada055f3c4..7488d95526de 100644 --- a/content/includes/tax-compliance.md +++ b/content/includes/tax-compliance.md @@ -1,5 +1,10 @@ > [!IMPORTANT] > -> Starting July 1, 2024, Docker will begin collecting sales tax on subscription fees in compliance with state regulations for customers in the United States. For our global customers subject to VAT, the implementation will start rolling out on July 1, 2024. Note that while the roll out begins on this date, VAT charges may not apply to all applicable subscriptions immediately. +> For United States customers, Docker began collecting sales tax on July 1, 2024. +> For European customers, Docker began collecting VAT on March 1, 2025. +> For United Kingdom customers, Docker began collecting VAT on May 1, 2025. > -> To ensure that tax assessments are correct, make sure that your [billing information](/billing/details/) and VAT/Tax ID, if applicable, are updated. If you're exempt from sales tax, see [Register a tax certificate](/billing/tax-certificate/). +> To ensure that tax assessments are correct, make sure that your +[billing information](/billing/details/) and VAT/Tax ID, if applicable, are +updated. If you're exempt from sales tax, see +[Register a tax certificate](/billing/tax-certificate/). \ No newline at end of file diff --git a/content/manuals/_index.md b/content/manuals/_index.md index 31252a51cae6..a0a8ea05ea78 100644 --- a/content/manuals/_index.md +++ b/content/manuals/_index.md @@ -10,8 +10,10 @@ params: sidebar: groups: - Open source + - AI - Products - Platform + - Enterprise notoc: true open-source: - title: Docker Build @@ -24,16 +26,46 @@ params: link: /engine/ - title: Docker Compose description: Define and run multi-container applications. - icon: /assets/icons/Compose.svg + icon: /icons/Compose.svg link: /compose/ + - title: Testcontainers + description: Run containers programmatically in your preferred programming language. + icon: /icons/Testcontainers.svg + link: /testcontainers/ + - title: MCP Gateway + description: Manage and secure your AI tools with a single gateway. + icon: /icons/toolkit.svg + link: /ai/mcp-gateway/ + + ai: + - title: Ask Gordon + description: Streamline your workflow and get the most out of the Docker ecosystem with your personal AI assistant. + icon: note_add + link: /ai/gordon/ + - title: Docker Model Runner + description: View and manage your local models. + icon: view_in_ar + link: /ai/model-runner/ + - title: MCP Catalog and Toolkit + description: Augment your AI workflow with MCP servers. + icon: /icons/toolkit.svg + link: /ai/mcp-catalog-and-toolkit/ products: - title: Docker Desktop description: Your command center for container development. - icon: /assets/icons/Whale.svg + icon: /icons/Whale.svg link: /desktop/ + - title: Docker Hardened Images + description: Secure, minimal images for trusted software delivery. + icon: /icons/dhi.svg + link: /dhi/ + - title: Docker Offload + description: Build and run containers in the cloud. + icon: cloud + link: /offload/ - title: Build Cloud description: Build your images faster in the cloud. - icon: /assets/images/logo-build-cloud.svg + icon: /icons/logo-build-cloud.svg link: /build-cloud/ - title: Docker Hub description: Discover, share, and integrate container images. @@ -41,7 +73,7 @@ params: link: /docker-hub/ - title: Docker Scout description: Image analysis and policy evaluation. - icon: /assets/icons/Scout.svg + icon: /icons/Scout.svg link: /scout/ - title: Docker for GitHub Copilot description: Integrate Docker's capabilities with GitHub Copilot. @@ -51,7 +83,7 @@ params: description: Customize your Docker Desktop workflow. icon: extension link: /extensions/ - - title: Testcontainers cloud + - title: Testcontainers Cloud description: Run integration tests, with real dependencies, in the cloud. icon: package_2 link: https://testcontainers.com/cloud/docs/ @@ -76,6 +108,11 @@ params: description: Commercial use licenses for Docker products. icon: card_membership link: /subscription/ + enterprise: + - title: Deploy Docker Desktop + description: Deploy Docker Desktop at scale within your company + icon: download + link: /enterprise/enterprise-deployment/ --- This section contains user guides on how to install, set up, configure, and use @@ -87,6 +124,12 @@ Open source development and containerization technologies. {{< grid items=open-source >}} +## AI + +All the Docker AI tools in one easy-to-access location. + +{{< grid items=ai >}} + ## Products End-to-end developer solutions for innovative teams. @@ -96,6 +139,12 @@ End-to-end developer solutions for innovative teams. ## Platform Documentation related to the Docker platform, such as administration and -subscription management for organizations. +subscription management. {{< grid items=platform >}} + +## Enterprise + +Targeted at IT administrators with help on deploying Docker Desktop at scale with configuration guidance on security related features. + +{{< grid items=enterprise >}} \ No newline at end of file diff --git a/content/manuals/accounts/_index.md b/content/manuals/accounts/_index.md index 65797233c44a..108d3e74592c 100644 --- a/content/manuals/accounts/_index.md +++ b/content/manuals/accounts/_index.md @@ -22,11 +22,15 @@ grid: - title: Personal access tokens description: Learn how to create and manage access tokens for your account. icon: password - link: /security/for-developers/access-tokens/ + link: /security/access-tokens/ - title: Set up two-factor authentication description: Add an extra layer of authentication to your Docker account. - link: /security/for-developers/2fa/ + link: /security/2fa/ icon: phonelink_lock +- title: Deactivate an account + description: Learn how to deactivate a Docker user account. + link: /accounts/deactivate-user-account/ + icon: disabled_by_default --- You can create a Docker account to secure a Docker ID, which is a username for your account that lets you access Docker products. You can use your Docker account to sign in to Docker products like Docker Hub, Docker Desktop, or Docker Scout. You can centrally manage your [Docker account settings](https://app.docker.com/settings), as well as account security features, in [Docker Home](https://app.docker.com). diff --git a/content/manuals/accounts/create-account.md b/content/manuals/accounts/create-account.md index 746c7f132777..fda0188df795 100644 --- a/content/manuals/accounts/create-account.md +++ b/content/manuals/accounts/create-account.md @@ -77,7 +77,7 @@ Once you register your Docker ID and verify your email address, you can sign in > When you use the `docker login` command, your credentials are stored in your home directory in `.docker/config.json`. The password is base64-encoded in this file. > -> We recommend using one of the [Docker credential helpers](https://github.com/docker/docker-credential-helpers) for secure storage of passwords. For extra security, you can also use a [personal access token](../security/for-developers/access-tokens.md) to sign in instead, which is still encoded in this file (without a Docker credential helper) but doesn't permit administrator actions (such as changing the password). +> We recommend using one of the [Docker credential helpers](https://github.com/docker/docker-credential-helpers) for secure storage of passwords. For extra security, you can also use a [personal access token](../security/access-tokens.md) to sign in instead, which is still encoded in this file (without a Docker credential helper) but doesn't permit administrator actions (such as changing the password). ### Sign in with your social provider diff --git a/content/manuals/accounts/deactivate-user-account.md b/content/manuals/accounts/deactivate-user-account.md index 8639c5c43c58..604b0f253bf5 100644 --- a/content/manuals/accounts/deactivate-user-account.md +++ b/content/manuals/accounts/deactivate-user-account.md @@ -5,7 +5,7 @@ description: Learn how to deactivate a Docker user account. keywords: Docker Hub, delete, deactivate, account, account management --- -You can deactivate an account at any time. This section describes the prerequisites and steps to deactivate a user account. For information on deactivating an organization, see [Deactivating an organization](../admin/deactivate-account.md). +You can deactivate an account at any time. This section describes the prerequisites and steps to deactivate a user account. For information on deactivating an organization, see [Deactivating an organization](../admin/organization/deactivate-account.md). >[!WARNING] > @@ -17,10 +17,10 @@ Before deactivating your Docker account, ensure you meet the following requireme - For owners, you must leave your organization or company before deactivating your Docker account. To do this: - 1. Sign in to the [Docker Admin Console](https://app.docker.com/admin). - 2. Select the organization you need to leave from the **Choose profile** page. - 3. Find your username in the **Members** tab. - 4. Select the **More options** menu and then select **Leave organization**. + 1. Sign in to [Docker Home](https://app.docker.com/admin) and choose + your organization. + 1. Select **Members** and find your username. + 1. Select the **Actions** menu and then select **Leave organization**. - If you are the sole owner of an organization, you must assign the owner role to another member of the organization and then remove yourself from the organization, or deactivate the organization. Similarly, if you are the sole owner of a company, either add someone else as a company owner and then remove yourself, or deactivate the company. @@ -39,7 +39,8 @@ Once you have completed all the previous steps, you can deactivate your account. > This cannot be undone. Be sure you've gathered all the data you need from your account before deactivating it. 1. Sign in to [Docker Home](https://app.docker.com/login). -2. Select your avatar to open the drop-down menu. -3. Select **Account settings**. -4. In the **Account management** section, select **Deactivate account**. -5. To confirm, select **Deactivate account**. +1. Select your avatar to open the drop-down menu. +1. Select **Account settings**. +1. Select **Deactivate**. +1. Select **Deactivate account**. +1. To confirm, select **Deactivate account**. diff --git a/content/manuals/accounts/manage-account.md b/content/manuals/accounts/manage-account.md index 316a8d654e3b..50e4d80f67b1 100644 --- a/content/manuals/accounts/manage-account.md +++ b/content/manuals/accounts/manage-account.md @@ -5,41 +5,47 @@ description: Learn how to manage settings for your Docker account. keywords: accounts, docker ID, account settings, account management, docker home --- -You can centrally manage the settings for your Docker account using Docker Home. Here you can also take administrative actions for your account and manage your account security. +You can centrally manage your Docker account settings using Docker Home. Here +you can also take administrative actions for your account and manage your +account security. > [!TIP] > -> If your account is associated with an organization that enforces single sign-on (SSO), you may not have permissions to update your account settings. You must contact your administrator to update your settings. +> If your account is associated with an organization that enforces single +> sign-on (SSO), you may not have permissions to update your account settings. +> You must contact your administrator to update your settings. ## Update general settings 1. Sign in to your [Docker account](https://app.docker.com/login). -2. In Docker Home, select your avatar in the top-right corner to open the drop-down. -3. Select **Account settings**. +2. Select your avatar in the top-right corner and select **Account settings**. From the Account settings page, you can take any of the following actions. ### Update account information -To update your account information, select the arrow icon. You can edit the following settings here: +Account information is visible on your account profile in Docker Hub. You can +update the following account information: - Full name - Company - Location - Website -- Gravatar email: To add an avatar to your Docker account, create a [Gravatar account](https://gravatar.com/) and create your avatar. Next, add your Gravatar email to your Docker account settings. It may take some time for your avatar to update in Docker. - -This information is visible on your account profile in Docker Hub. +- Gravatar email: To add an avatar to your Docker account, create a +[Gravatar account](https://gravatar.com/) and create your avatar. Next, add your +Gravatar email to your Docker account settings. It may take some time for your +avatar to update in Docker. Make your changes here, then select **Save** to save your settings. ### Update email address -To update your email address, select the arrow icon. +To update your email address, select **Email**: 1. Enter your new email address. 2. Enter your password to confirm the change. -3. Select **Send verification email** to send a verification email to your new email address. +3. Select **Send verification email** to send a verification email to your new +email address. Once you verify your email address, your account information will update. @@ -47,22 +53,38 @@ Once you verify your email address, your account information will update. You can change your password by initiating a password reset via email. -To change your password, select **Reset password**. - +To change your password, select **Password** and then **Reset password**. Follow the instructions in the password reset email. ## Manage security settings -You can manage the security settings for your account in Docker Home. +To update your two-factor authentication (2FA) settings, select **2FA**. +For information on two-factor authentication (2FA) for your account, see +[Enable two-factor authentication](../security/2fa/_index.md) +to get started. -For information on two-factor authentication (2FA) for your account, see [Enable two-factor authentication](../security/for-developers/2fa/_index.md) to get started. +To manage personal access tokens, select **Personal access tokens**. +For information on personal access tokens, see +[Create and manage access tokens](../security/access-tokens.md). -For information on personal access tokens, see [Create and manage access tokens](../security/for-developers/access-tokens.md). +## Manage connected accounts -## Account management +You can unlink Google or GitHub accounts that are linked to your Docker account +using the Account settings page: -You can take administrative actions for your account in Docker Home. +1. Select **Connected accounts**. +2. Select **Disconnect** on your connected account. +3. To fully unlink your Docker account, you must also unlink Docker from Google +or GitHub. See Google or GitHub's documentation for more information: + - [Manage connections between your Google Account and third-parties](https://support.google.com/accounts/answer/13533235?hl=en) + - [Reviewing and revoking authorization of GitHub Apps](https://docs.github.com/en/apps/using-github-apps/reviewing-and-revoking-authorization-of-github-apps) + +## Account management -For more information on converting your account, see [Convert an account into an organization](../admin/organization/convert-account.md). +To convert your account into an organization, select **Convert**. +For more information on converting your account, see +[Convert an account into an organization](../admin/organization/convert-account.md). -For information on deactivating your account, see [Deactivating a user account](./deactivate-user-account.md). +To deactivate your account, select **Deactivate**. +For information on deactivating your account, see +[Deactivating a user account](./deactivate-user-account.md). diff --git a/content/manuals/admin/_index.md b/content/manuals/admin/_index.md index 8ea5bd56c7d2..93818c67a78b 100644 --- a/content/manuals/admin/_index.md +++ b/content/manuals/admin/_index.md @@ -1,11 +1,11 @@ --- title: Administration -description: Discover manuals on administration for accounts, organizations, and companies. +description: Overview of administration features and roles in the Docker Admin Console keywords: admin, administration, company, organization, Admin Console, user accounts, account management weight: 10 params: sidebar: - group: Platform + group: Enterprise grid: - title: Company administration description: Explore how to manage a company. @@ -35,17 +35,66 @@ aliases: - /docker-hub/admin-overview --- -Administrators can manage companies and organizations using the Docker Admin Console, or manage organizations in Docker Hub. +Administrators can manage companies and organizations using the +[Docker Admin Console](https://app.docker.com/admin). The Admin Console +provides centralized observability, access management, and security controls +across Docker environments. -The Docker Admin Console is available in [Early Access](../release-lifecycle.md#early-access-ea) to all company owners and organization owners. The [Docker Admin Console](https://admin.docker.com) provides administrators with centralized observability, access management, and controls for their company and organizations. To provide these features, Docker uses the following hierarchy and roles. +## Company and organization hierarchy -![Docker hierarchy](./images/docker-admin-structure.webp) +The [Docker Admin Console](https://app.docker.com/admin) provides administrators with centralized observability, access management, and controls for their company and organizations. To provide these features, Docker uses the following hierarchy and roles. -- Company: A company simplifies the management of Docker organizations and settings. Creating a company is optional and only available to Docker Business subscribers. - - Company owner: A company can have multiple owners. Company owners have company-wide observability and can manage company-wide settings that apply to all associated organizations. In addition, company owners have the same access as organization owners for all associated organizations. -- Organization: An organization is a collection of teams and repositories. Docker Team and Business subscribers must have at least one organization. - - Organization owner: An organization can have multiple owners. Organization owners have observability into their organization and can manage its users and settings. -- Team: A team is a group of Docker members that belong to an organization. Organization and company owners can group members into additional teams to configure repository permissions on a per-team basis. Using teams to group members is optional. -- Member: A member is a Docker user that's a member of an organization. Organization and company owners can assign roles to members to define their permissions. +![Diagram showing Docker’s administration hierarchy with Company at the top, followed by Organizations, Teams, and Members](./images/docker-admin-structure.webp) + +### Company + +A company groups multiple Docker organizations for centralized configuration. +Companies are only available for Docker Business subscribers. + +Companies have the following administrator role available: + +- Company owner: Can view and manage all organizations within the company. +Has full access to company-wide settings and inherits the same permissions as +organization owners. + +### Organization + +An organization contains teams and repositories. All Docker Team and Business +subscribers must have at least one organization. + +Organizations have the following administrator role available: + +- Organization owner: Can manage organization settings, users, and access +controls. + +### Team + +Teams are optional and let you group members to assign repository permissions +collectively. Teams simplify permission management across projects +or functions. + +### Member + +A member is any Docker user added to an organization. Organization and company +owners can assign roles to members to define their level of access. + +> [!NOTE] +> +> Creating a company is optional, but organizations are required for Team and +Business subscriptions. + +## Admin Console features + +Docker's [Admin Console](https://app.docker.com/admin) allows you to: + +- Create and manage companies and organizations +- Assign roles and permissions to members +- Group members into teams to manage access by project or role +- Set company-wide policies, including SCIM provisioning and security +enforcement + +## Manage companies and organizations + +Learn how to manage companies and organizations in the following sections. {{< grid >}} diff --git a/content/manuals/admin/company/_index.md b/content/manuals/admin/company/_index.md index ef9332f1cf83..fb6f8ea723f8 100644 --- a/content/manuals/admin/company/_index.md +++ b/content/manuals/admin/company/_index.md @@ -1,8 +1,8 @@ --- -title: Company administration +title: Company administration overview weight: 20 description: Learn how to manage multiple organizations using companies, including managing users, owners, and security. -keywords: company, multiple organizations, manage companies +keywords: company, multiple organizations, manage companies, admin console, Docker Business settings grid: - title: Create a company description: Get started by learning how to create a company. @@ -13,15 +13,15 @@ grid: company. icon: store link: /admin/company/organizations/ -- title: Manage users - description: Explore how to manage users in all organizations. - icon: group_add - link: /admin/company/users/ - title: Manage company owners description: Find out more about company owners and how to manage them. icon: supervised_user_circle link: /admin/company/owners/ -- title: Configure Single Sign-On +- title: Manage users + description: Explore how to manage users in all organizations. + icon: group_add + link: /admin/company/users/ +- title: Configure single sign-on description: Discover how to configure SSO for your entire company. icon: key link: /security/for-admins/single-sign-on/ @@ -31,11 +31,11 @@ grid: icon: checklist link: /security/for-admins/provisioning/scim/ - title: Domain management - description: Add and verify your domains. + description: Add and verify your company's domains. icon: domain_verification - link: /admin/company/settings/domains/ + link: /security/for-admins/domain-management/ - title: FAQs - description: Explore common company FAQs. + description: Explore frequently asked questions about companies. link: /faq/admin/company-faqs/ icon: help aliases: @@ -44,8 +44,30 @@ aliases: {{< summary-bar feature_name="Company" >}} -{{< include "admin-company-overview.md" >}} +A company provides a single point of visibility across multiple organizations, +simplifying organization and settings management. + +Organization owners with a Docker Business subscription can create a company +and manage it through the [Docker Admin Console](https://app.docker.com/admin). + +The following diagram shows how a company relates to its associated +organizations. + +![Diagram showing how companies relate to Docker organizations](/admin/images/docker-admin-structure.webp) + +## Key features + +With a company, administrators can: + +- View and manage all nested organizations +- Configure company and organization settings centrally +- Control access to the company +- Have up to ten unique users assigned to the company owner role +- Configure SSO and SCIM for all nested organizations +- Enforce SSO for all users in the company + +## Create and manage your company -Learn how to administer a company in the following sections. +Learn how to create and manage a company in the following sections. {{< grid >}} diff --git a/content/manuals/admin/company/new-company.md b/content/manuals/admin/company/new-company.md index d7525c4622ad..376a1989c448 100644 --- a/content/manuals/admin/company/new-company.md +++ b/content/manuals/admin/company/new-company.md @@ -1,37 +1,42 @@ --- title: Create a company description: Learn how to create a company to centrally manage multiple organizations. -keywords: company, hub, organization, company owner, Admin Console, company management +keywords: company, hub, organization, company owner, Admin Console, company management, Docker Business, create company, Docker Admin Console aliases: - /docker-hub/new-company/ --- {{< summary-bar feature_name="Company" >}} -You can create a new company in the Docker Admin Console. Before you begin, you must: +Learn how to create a new company in the Docker Admin Console, a centralized +dashboard for managing organizations. + +## Prerequisites + +Before you begin, you must: + - Be the owner of the organization you want to add to your company - Have a Docker Business subscription -{{< include "admin-early-access.md" >}} - ## Create a company To create a new company: -1. Sign in to the [Admin Console](https://app.docker.com/admin). -2. Select your organization you want to add to your company from the **Choose profile** page. -3. Under **Organization settings**, select **Company management**. -4. Select **Create a company**. -5. Enter a unique name for your company, then select **Continue**. +1. Sign in to [Docker Home](https://app.docker.com/) and select your +organization. +1. Select **Admin Console**, then **Company management**. +1. Select **Create a company**. +1. Enter a unique name for your company, then select **Continue**. > [!TIP] > - > The name for your company can't be the same as an existing user, organization, or company namespace. - -6. Review the company migration details and then select **Create company**. + > The name for your company can't be the same as an existing user, + organization, or company namespace. -For more information on how you can add organizations to your company, see [Add organizations to a company](./organizations.md#add-organizations-to-a-company). +1. Review the migration details and then select **Create company**. +For more information on how you can add organizations to your company, +see [Add organizations to a company](./organizations.md#add-organizations-to-a-company). ## Next steps diff --git a/content/manuals/admin/company/organizations.md b/content/manuals/admin/company/organizations.md index 37964a816160..19d061259332 100644 --- a/content/manuals/admin/company/organizations.md +++ b/content/manuals/admin/company/organizations.md @@ -1,51 +1,59 @@ --- -description: Learn how to manage organizations in a company. -keywords: company, multiple organizations, manage organizations title: Manage company organizations +description: Learn how to manage organizations in a company. +keywords: company, multiple organizations, manage organizations, Docker Admin Console, organization settings, add organization, company management --- {{< summary-bar feature_name="Company" >}} -You can manage the organizations in a company in the Docker Admin Console. - -{{< include "admin-early-access.md" >}} +Learn to manage the organizations in a company using the Docker Admin Console. ## View all organizations -1. Sign in to the [Admin Console](https://admin.docker.com). -2. Select your company on the **Choose profile** page. -3. Under **Organizations**, select **Overview**. +1. Sign in to the [Docker Home](https://app.docker.com) and choose +your company. +1. Select **Admin Console**, then **Organizations**. -The organization overview page displays all organizations under your company. +The **Organizations** view displays all organizations under your company. ## Add seats to an organization -When you have a [self-serve](../../subscription/details.md#self-serve) subscription that has no pending subscription changes, you can add seats using the following steps. If you have a sales-assisted subscription, you can contact Docker support or sales to add seats. +If you have a [self-serve](../../subscription/details.md#self-serve) +subscription that has no pending subscription changes, you can add seats using +Docker Home. For more information about adding seats, +see [Manage seats](/manuals/subscription/manage-seats.md#add-seats). -For more information about adding seats, see [Manage seats](/manuals/subscription/manage-seats.md#add-seats). +If you have a sales-assisted subscription, you must contact Docker support or +sales to add seats. ## Add organizations to a company -You must be a company owner to add an organization to a company. You must also be an organization owner of the organization you want to add. There is no limit to the number of organizations you can have under a company layer. All organizations must have a Business subscription. +To add an organization to a company, ensure the following: + +- You are a company owner. +- You are an organization owner of the organization you want to add. +- The organization has a Docker Business subscription. +- There’s no limit to how many organizations can exist under a company. > [!IMPORTANT] > -> Once you add an organization to a company, you can't remove it from the company. +> Once you add an organization to a company, you can't remove it from the +company. -1. Sign in to the [Admin Console](https://admin.docker.com). -2. Select your company on the **Choose profile** page. -3. Select **Organizations**, then **Overview**. -4. Select **Add organization**. -5. Choose the organization you want to add from the drop-down menu. -6. Select **Add organization** to confirm. +1. Sign in to [Docker Home](https://app.docker.com) and select your company. +1. Select **Admin Console**, then **Organizations**. +1. Select **Add organization**. +1. Choose the organization you want to add from the drop-down menu. +1. Select **Add organization** to confirm. ## Manage an organization -1. Sign in to the [Admin Console](https://admin.docker.com). -2. Select your company on the **Choose profile** page. -3. Select the organization that you want to manage. +1. Sign in to [Docker Home](https://app.docker.com) and select your company. +1. Select **Admin Console**, then **Organizations**. +1. Select the organization you want to manage. -For more details about managing an organization, see [Organization administration](../organization/_index.md). +For more details about managing an organization, see +[Organization administration](../organization/_index.md). ## More resources diff --git a/content/manuals/admin/company/owners.md b/content/manuals/admin/company/owners.md index e5c21edba9f7..c3afa9a65108 100644 --- a/content/manuals/admin/company/owners.md +++ b/content/manuals/admin/company/owners.md @@ -1,35 +1,36 @@ --- -description: Learn how to add and remove company owners. -keywords: company, owners title: Manage company owners +description: Learn how to add and remove company owners. +keywords: company, owners, add company owner, remove company owner, company manageemnt, company owner permissions aliases: - /docker-hub/company-owner/ --- {{< summary-bar feature_name="Company" >}} -A company can have multiple owners. Company owners have company-wide -observability and can manage company-wide settings that apply to all associated -organizations. In addition, company owners have the same access as organization -owners for all associated organizations. Unlike organization owners, company -owners don't need to be member of an organization. When company owners aren't a -member in an organization, they don't occupy a seat. +A company can have multiple owners. Company owners have visibility across the +entire company and can manage settings that apply to all organizations under +that company. They also have the same access rights as organization owners but +don’t need to be members of any individual organization. -{{< include "admin-early-access.md" >}} +> [!IMPORTANT] +> +> Company owners do not occupy a seat unless one of the following is true: +> +> - They are added as a member of an organization under your company +> - SSO is enabled ## Add a company owner -1. Sign in to the [Admin Console](https://admin.docker.com). -2. Select your company on the **Choose profile** page. -3. Select **Company owners**. -4. Select **Add owner**. -5. Specify the user's Docker ID to search for the user. -6. After you find the user, select **Add company owner**. +1. Sign in to [Docker Home](https://app.docker.com) and select your company. +1. Select **Admin Console**, then **Company owners**. +1. Select **Add owner**. +1. Specify the user's Docker ID to search for the user. +1. After you find the user, select **Add company owner**. ## Remove a company owner -1. Sign in to the [Admin Console](https://admin.docker.com). -2. Select your company on the **Choose profile** page. -3. Select **Company owners**. -4. Select the **Action** icon in the row of the company owner that your want to remove. -5. Select **Remove as company owner**. +1. Sign in to [Docker Home](https://app.docker.com) and select your company. +1. Select **Admin Console**, then **Company owners**. +1. Locate the company owner you want to remove and select the **Actions** menu. +1. Select **Remove as company owner**. diff --git a/content/manuals/admin/company/users.md b/content/manuals/admin/company/users.md index f76ec83e29e4..d747a8ed235f 100644 --- a/content/manuals/admin/company/users.md +++ b/content/manuals/admin/company/users.md @@ -1,17 +1,145 @@ --- +title: Manage company members description: Learn how to manage company users in the Docker Admin Console. -keywords: company, company users, users, admin, Admin Console -title: Manage company users +keywords: company, company users, users, admin, Admin Console, memeber management, organization management, company management, bulk invite, resend invites --- {{< summary-bar feature_name="Company" >}} -You can manage users at the company-level in the Docker Admin Console. +Company owners can invite new members to an organization via Docker ID, +email address, or in bulk with a CSV file containing email +addresses. -{{% admin-users product="admin" layer="company" %}} +If an invitee does not have a Docker account, they must create an account and +verify their email address before they can accept an invitation to join the +organization. Pending invitations occupy seats for the organization +the user is invited to. -{{< include "admin-early-access.md" >}} +## Invite members via Docker ID or email address + +Use the following steps to invite members to your organization via Docker ID or +email address. + +1. Sign in to [Docker Home](https://app.docker.com) and select +your company. +1. On the **Organizations** page, select the organization you want +to invite members to. +1. Select **Members**, then **Invite**. +1. Select **Emails or usernames**. +1. Follow the on-screen instructions to invite members. + Invite a maximum of 1000 members and separate multiple entries by comma, + semicolon, or space. + + > [!NOTE] + > + > When you invite members, you assign them a role. + > See [Roles and permissions](/security/for-admins/roles-and-permissions/) + > for details about the access permissions for each role. + + Pending invitations appear on the Members page. The invitees receive an + email with a link to Docker Hub where they can accept or decline the + invitation. + +## Invite members via CSV file + +To invite multiple members to an organization via a CSV file containing email +addresses: + +1. Sign in to [Docker Home](https://app.docker.com) and select +your company. +1. On the **Organizations** page, select the organization you want +to invite members to. +1. Select **Members**, then **Invite**. +1. Select **CSV upload**. +1. Select **Download the template CSV file** to optionally download an example +CSV file. The following is an example of the contents of a valid CSV file. + + ```text + email + docker.user-0@example.com + docker.user-1@example.com + ``` + + CSV file requirements: + + - The file must contain a header row with at least one heading named `email`. + Additional columns are allowed and are ignored in the import. + - The file must contain a maximum of 1000 email addresses (rows). To invite + more than 1000 users, create multiple CSV files and perform all steps in + this task for each file. + +1. Create a new CSV file or export a CSV file from another application. + + - To export a CSV file from another application, see the application’s + documentation. + - To create a new CSV file, open a new file in a text editor, type `email` + on the first line, type the user email addresses one per line on the + following lines, and then save the file with a .csv extension. + +1. Select **Browse files** and then select your CSV file, or drag and drop the +CSV file into the **Select a CSV file to upload** box. You can only select +one CSV file at a time. + + > [!NOTE] + > + > If the amount of email addresses in your CSV file exceeds the number of + available seats in your organization, you cannot continue to invite members. + To invite members, you can purchase more seats, or remove some email + addresses from the CSV file and re-select the new file. To purchase more + seats, see [Add seats to your subscription](/subscription/add-seats/) or + [Contact sales](https://www.docker.com/pricing/contact-sales/). + +1. After the CSV file has been uploaded, select **Review**. + + Valid email addresses and any email addresses that have issues will appear. + Email addresses may have the following issues: + + - Invalid email: The email address is not a valid address. The email address + will be ignored if you send invites. You can correct the email address in + the CSV file and re-import the file. + - Already invited: The user has already been sent an invite email and another + invite email will not be sent. + - Member: The user is already a member of your organization and an invite + email will not be sent. + - Duplicate: The CSV file has multiple occurrences of the same email address. + The user will be sent only one invite email. + +1. Follow the on-screen instructions to invite members. + + > [!NOTE] + > + > When you invite members, you assign them a role. + > See [Roles and permissions](/security/for-admins/roles-and-permissions/) + > for details about the access permissions for each role. + +Pending invitations appear on the Members page. The invitees receive an email +with a link to Docker Hub where they can accept or decline the invitation. + +## Resend invitations to users + +You can resend individual invitations, or bulk invitations from the Admin Console. + +### Resend individual invitations + +1. In [Docker Home](https://app.docker.com/), select your company. +2. Select **Admin Console**, then **Users**. +3. Select the **action menu** next to the invitee and select **Resend**. +4. Select **Invite** to confirm. + +### Bulk resend invitation + +1. In [Docker Home](https://app.docker.com/), select your company. +2. Select **Admin Console**, then **Users**. +3. Use the **checkboxes** next to **Usernames** to bulk select users. +4. Select **Resend invites**. +5. Select **Resend** to confirm. + +## Invite members via API + +You can bulk invite members using the Docker Hub API. For more information, +see the [Bulk create invites](https://docs.docker.com/reference/api/hub/latest/#tag/invites/paths/~1v2~1invites~1bulk/post) API endpoint. ## Manage members on a team -Use Docker Hub to add a member to a team or remove a member from a team. For more details, see [Manage members in Docker Hub](../organization/members.md#manage-members-on-a-team). +Use Docker Hub to add a member to a team or remove a member from a team. For +more details, see [Manage members](../organization/members.md#manage-members-on-a-team). diff --git a/content/manuals/admin/deactivate-account.md b/content/manuals/admin/deactivate-account.md deleted file mode 100644 index e23437a7b20b..000000000000 --- a/content/manuals/admin/deactivate-account.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: Deactivate an organization -description: Learn how to deactivate a Docker organization. -keywords: Docker Hub, delete, deactivate organization, account, organization management -aliases: -- /docker-hub/deactivate-account/ ---- - -{{< summary-bar feature_name="General admin" >}} - -You can deactivate an account at any time. This section describes the prerequisites and steps to deactivate an organization account. For information on deactivating a user account, see [Deactivate a user account](../accounts/deactivate-user-account.md). - -> [!WARNING] -> -> All Docker products and services that use your Docker account or organization account will be inaccessible after deactivating your account. - -## Prerequisites - -Before deactivating an organization, complete the following: - -- Download any images and tags you want to keep: - `docker pull -a :`. - -- If you have an active Docker subscription, [downgrade it to a free subscription](../subscription/change.md). - -- Remove all other members within the organization. - -- Unlink your [Github and Bitbucket accounts](../docker-hub/repos/manage/builds/link-source.md#unlink-a-github-user-account). - -- For Business organizations, [remove your SSO connection](../security/for-admins/single-sign-on/manage/#remove-an-organization). - -## Deactivate - -Once you have completed all the previous steps, you can deactivate your organization. - -> [!WARNING] -> -> This cannot be undone. Be sure you've gathered all the data you need from your organization before deactivating it. - -{{< tabs >}} -{{< tab name="Admin Console" >}} - -1. In Admin Console, choose the organization you want to deactivate. -2. Under **Organization settings**, select **Deactivate**. -3. Enter the organization name to confirm deactivation. -4. Select **Deactivate organization**. - -{{< /tab >}} -{{< tab name="Docker Hub" >}} - -1. On Docker Hub, select **Organizations**. -2. Choose the organization you want to deactivate. -3. In **Settings**, select the **Deactivate Org** tab and then **Deactivate organization**. - -{{< /tab >}} -{{< /tabs >}} diff --git a/content/manuals/admin/faqs/_index.md b/content/manuals/admin/faqs/_index.md index 5bae20c90b97..af8a1116a484 100644 --- a/content/manuals/admin/faqs/_index.md +++ b/content/manuals/admin/faqs/_index.md @@ -1,6 +1,7 @@ --- build: render: never +linkTitle: FAQ title: Account and admin FAQ weight: 30 --- diff --git a/content/manuals/admin/faqs/company-faqs.md b/content/manuals/admin/faqs/company-faqs.md index 9e8efcbc7f2e..890214e0c9c9 100644 --- a/content/manuals/admin/faqs/company-faqs.md +++ b/content/manuals/admin/faqs/company-faqs.md @@ -10,70 +10,40 @@ aliases: - /faq/admin/company-faqs/ --- -### Are existing subscriptions affected when you create a company and add organizations to it? - -You can manage subscriptions and related billing details at the organization level. - ### Some of my organizations don’t have a Docker Business subscription. Can I still use a parent company? -Yes, but you can only add organizations with a Docker Business subscription to a company. +Yes, but you can only add organizations with a Docker Business subscription +to a company. ### What happens if one of my organizations downgrades from Docker Business, but I still need access as a company owner? -To access and manage child organizations, the organization must have a Docker Business subscription. If the organization isn’t included in this subscription, the owner of the organization must manage the organization outside of the company. - -### Does my organization need to prepare for downtime during the migration process? - -No, you can continue with business as usual. - -### How many company owners can I add? - -You can add a maximum of 10 company owners to a single company account. +To access and manage child organizations, the organization must have a +Docker Business subscription. If the organization isn’t included in this +subscription, the owner of the organization must manage the organization +outside of the company. ### Do company owners occupy a subscription seat? -Company owners don't occupy a seat in any organization unless they are added as a -member of the organization. Since company owners have the same access as -organization owners for all organizations associated with the company, it is not -necessary to add company owners to an organization. - -Note that when you first create a company, your account will be both a company -owner and an organization owner. Your account will occupy a seat as long as -you're an organization owner. - -### What permissions does the company owner have in the associated/nested organizations? - -Company owners can navigate to the **Organizations** page to view all their nested organizations in a single location. They can also view or edit organization members and change single sign-on (SSO) and System for Cross-domain Identity Management (SCIM) settings. Changes to company settings impact all users in each organization under the company. For more information, see [Roles and permissions](../../security/for-admins/roles-and-permissions.md). +Company owners do not occupy a seat unless one of the following is true: -### What features are supported at the company level? +- They are added as a member of an organization under your company +- SSO is enabled -You can manage domain verification, SSO, and SCIM at the company level. The following features aren't supported at the company level, but you can manage them at the organization level: +Although company owners have the same access as organization owners across all +organizations in the company, it's not necessary to add them to any +organization. Doing so will cause them to occupy a seat. -- Image Access Management -- Registry Access Management -- User management -- Billing +When you first create a company, your account is both a company owner and an +organization owner. In that case, your account will occupy a seat as long as +you remain an organization owner. -To view and manage users across all the organizations under your company, you can [manage users at the company level](../../admin/company/users.md) when you use the [Admin Console](https://admin.docker.com). +To avoid occupying a seat, [assign another user as the organization owner](/manuals/admin/organization/members.md#update-a-member-role) and remove yourself from the organization. +You'll retain full administrative access as a company owner without using a +subscription seat. -Domain audit isn't supported for companies or organizations within a company. - -### What's required to create a company name? - -A company name must be unique to that of its child organization. If a child organization requires the same name as a company, you should modify it slightly. For example, **Docker Inc** (parent company), **Docker** (child organization). - -### How does a company owner add an organization to the company? - -You can add organizations to a company in the Admin Console. For more information, see [Add organizations to a company](../../admin/company/organizations.md#add-organizations-to-a-company.md). - -### How does a company owner manage SSO/SCIM settings for a company? - -See your [SCIM](scim.md) and [SSO](../../security/for-admins/single-sign-on/configure/_index.md) settings. - -### How does a company owner enable group mapping in an IdP? - -See [SCIM](scim.md) and [group mapping](../../security/for-admins/provisioning/group-mapping.md) for more information. +### What permissions does the company owner have in the associated/nested organizations? -### What's the definition of a company versus an organization? +Company owners can navigate to the **Organizations** page to view all their +nested organizations in a single location. They can also view or edit organization members and change single sign-on (SSO) and System for Cross-domain Identity Management (SCIM) settings. Changes to company settings impact all users in each organization under the company. -A company is a collection of organizations that are managed together. An organization is a collection of repositories and teams that are managed together. +For more information, see [Roles and permissions](/manuals/enterprise/security/roles-and-permissions.md). diff --git a/content/manuals/admin/faqs/general-faqs.md b/content/manuals/admin/faqs/general-faqs.md index eb34f31a5fcb..a062450f6020 100644 --- a/content/manuals/admin/faqs/general-faqs.md +++ b/content/manuals/admin/faqs/general-faqs.md @@ -1,6 +1,6 @@ --- -title: General FAQs for Docker accounts -linkTitle: General +title: FAQs on Docker accounts +linkTitle: Accounts weight: 10 description: Frequently asked Docker account and administration questions keywords: onboarding, docker, teams, orgs, user accounts, organization accounts @@ -13,76 +13,63 @@ aliases: ### What is a Docker ID? -A Docker ID is a username for your Docker account that lets you access Docker products. To create a Docker ID, you need an email address or you can sign up with your social or GitHub accounts. Your Docker ID must be between 4 and 30 characters long, and can only contain numbers and lowercase letters. You can't use any special characters or spaces. +A Docker ID is a username for your Docker account that lets you access Docker +products. To create a Docker ID you need one of the following: -For more information, see [Docker ID](/accounts/create-account/). If your administrator enforces [single sign-on (SSO)](../../security/for-admins/single-sign-on/_index.md), this provisions a Docker ID for new users. +- An email address +- A social account +- A GitHub account -Developers may have multiple Docker IDs in order to separate their Docker IDs associated with an organization with a Docker Business or Team subscription, and their personal use Docker IDs. +Your Docker ID must be between 4 and 30 characters long, and can only contain +numbers and lowercase letters. You can't use any special characters or spaces. -### What if my Docker ID is taken? - -All Docker IDs are first-come, first-served except for companies that have a US Trademark on a username. If you have a trademark for your namespace, [Docker Support](https://hub.docker.com/support/contact/) can retrieve the Docker ID for you. +For more information, see [Create a Docker ID](/accounts/create-account/). -### What’s an organization? +### Can I change my Docker ID? -An organization in Docker is a collection of teams and repositories that are managed together. Docker users become members of an organization once they're associated with that organization by an organization owner. An [organization owner](#who-is-an-organization-owner) is a user with administrative access to the organization. For more information on creating organizations, see [Create your organization](orgs.md). - -### What's an organization name or namespace? +No. You can't change your Docker ID once it's created. If you need a different +Docker ID, you must create a new Docker account with a new Docker ID. -The organization name, sometimes referred to as the organization namespace or the organization ID, is the unique identifier of a Docker organization. The organization name can't be the same as an existing Docker ID. +Docker IDs can't be reused after deactivation. -### What are roles? - -A role is a collection of permissions granted to members. Roles define access to perform actions in Docker Hub like creating repositories, managing tags, or viewing teams. See [Roles and permissions](roles-and-permissions.md). +### What if my Docker ID is taken? -### What’s a team? +All Docker IDs are first-come, first-served except for companies that have a +U.S. Trademark on a username. -A team is a group of Docker users that belong to an organization. An organization can have multiple teams. An organization owner can then create new teams and add members to an existing team using Docker IDs or email address and by selecting a team the user should be part of. See [Create and manage a team](manage-a-team.md). +If you have a trademark for your namespace, +[Docker Support](https://hub.docker.com/support/contact/) can retrieve the +Docker ID for you. -### What's a company? +### What's an organization name or namespace? -A company is a management layer that centralizes administration of multiple organizations. Administrators can add organizations with a Docker Business subscription to a company and configure settings for all organizations under the company. See [Set up your company](/admin/company/). +The organization name, sometimes referred to as the organization namespace or +the organization ID, is the unique identifier of a Docker organization. The +organization name can't be the same as an existing Docker ID. ### Who is an organization owner? -An organization owner is an administrator who has permissions to manage -repositories, add members, and manage member roles. They have full access to -private repositories, all teams, billing information, and organization settings. -An organization owner can also specify [repository permissions](manage-a-team.md#configure-repository-permissions-for-a-team) for each team in the -organization. Only an organization owner can enable SSO for the organization. -When SSO is enabled for your organization, the organization owner can also -manage users. +An organization owner is a member who has administrator permissions. They +have full access to private repositories, all teams, billing information, and +organization settings. -Docker can auto-provision Docker IDs for new end-users or users who'd like to -have a separate Docker ID for company use through SSO enforcement. - -The organization owner can also add additional owners to help them manage users, teams, and repositories in the organization. +For more information on the organization owner role, see [Roles and permissions](/manuals/enterprise/security/roles-and-permissions.md). ### Can I configure multiple SSO identity providers (IdPs) to authenticate users to a single org? -Docker SSO allows only one IdP configuration per organization. For more -information, see [Configure SSO](../../security/for-admins/single-sign-on/configure/_index.md) and [SSO FAQs](../../security/faqs/single-sign-on/faqs.md). +Yes. Docker SSO supports multiple IdP configurations. For more +information, see [Configure SSO](/manuals/enterprise/security/single-sign-on/configure.md) and [SSO FAQs](../../security/faqs/single-sign-on/faqs.md). ### What is a service account? -> [!IMPORTANT] -> -> As of December 10, 2024, service accounts are no longer available. Existing Service Account agreements will be honored until their current term expires, but new purchases or renewals of service accounts no longer available and customers must renew under a new subscription plan. It is recommended to transition to Organization Access Tokens (OATs), which can provide similar functionality. For more information, see [Organization access tokens (Beta)](/manuals/security/for-admins/access-tokens.md). +Service accounts were deprecated on December 10, 2024. Existing Service Account +agreements will be honored until their current term expires, but new purchases +or renewals of service accounts are no longer available and customers must renew +under a new subscription. -A [service account](../../docker-hub/service-accounts.md) is a Docker ID used for automated management of container images or containerized applications. Service accounts are typically used in automated workflows, and don't share Docker IDs with the members in the Team or Business plan. Common use cases for service accounts include mirroring content on Docker Hub, or tying in image pulls from your CI/CD process. +For similar functionality, +transition to [Organization access tokens](/manuals/enterprise/security/access-tokens.md). ### Can I delete or deactivate a Docker account for another user? -Only someone with access to the Docker account can deactivate the account. For more details, see [Deactivating an account](../../admin/deactivate-account.md). - -If the user is a member of your organization, you can remove the user from your organization. For more details, see [Remove a member or invitee](../../admin/organization/members.md#remove-a-member-from-a-team). - -### How do I manage settings for a user account? - -You can manage your account settings anytime when you sign in to your [Docker account](https://app.docker.com/login). In Docker Home, select your avatar in the top-right navigation, then select **My Account**. You can also access this menu from any Docker web applications when you're signed in to your account. See [Manage your Docker account](/accounts/manage-account). If your account is associated with an organization that uses SSO, you may have limited access to the settings that you can control. - -### How do I add an avatar to my Docker account? - -To add an avatar to your Docker account, create a [Gravatar account](https://gravatar.com/) and create your avatar. Next, add your Gravatar email to your Docker account settings. - -Note, that it may take some time for your avatar to update in Docker. +Only someone with access to the Docker account can deactivate the account. For more details, see [Deactivating an account](../../admin/organization/deactivate-account.md). diff --git a/content/manuals/admin/faqs/organization-faqs.md b/content/manuals/admin/faqs/organization-faqs.md index 0f6261fcb6bf..f58b4af37dea 100644 --- a/content/manuals/admin/faqs/organization-faqs.md +++ b/content/manuals/admin/faqs/organization-faqs.md @@ -10,80 +10,62 @@ aliases: - /faq/admin/organization-faqs/ --- -### What if the Docker ID I want for my organization or company is taken? +### How can I see how many active users are in my organization? -All Docker IDs are first-come, first-served except for companies that have a U.S. Trademark on a username. If you have a trademark for your namespace, [Docker Support](https://hub.docker.com/support/contact/) can retrieve the Docker ID for you. +If your organization uses a Software Asset Management tool, you can use it to +find out how many users have Docker Desktop installed. If your organization +doesn't use this software, you can run an internal survey +to find out who is using Docker Desktop. -### How do I add an organization owner? +For more information, see [Identify your Docker users and their Docker accounts](../../admin/organization/onboard.md#step-1-identify-your-docker-users-and-their-docker-accounts). -An existing owner can add additional team members as organization owners. You can [invite a member](../../admin/organization/members.md#invite-members) and assign them the owner role in Docker Hub or the Docker Admin Console. +### Do users need to authenticate with Docker before an owner can add them to an organization? -### How do I know how many active users are part of my organization? - -If your organization uses a Software Asset Management tool, you can use it to find out how many users have Docker Desktop installed. If your organization doesn't use this software, you can run an internal survey to find out who is using Docker Desktop. See [Identify your Docker users and their Docker accounts](../../admin/organization/onboard.md#step-1-identify-your-docker-users-and-their-docker-accounts). With a Docker Business subscription, you can manage members in your identity provider and automatically provision them to your Docker organization with [SSO](../../security/for-admins/single-sign-on/_index.md) or [SCIM](../../security/for-admins/provisioning/scim.md). - -### Do users first need to authenticate with Docker before an owner can add them to an organization? - -No. Organization owners can invite users with their email addresses, and also assign them to a team during the invite process. +No. Organization owners can invite users with their email addresses, and also +assign them to a team during the invite process. ### Can I force my organization's members to authenticate before using Docker Desktop and are there any benefits? -Yes. You can [enforce sign-in](../../security/for-admins/enforce-sign-in/_index.md). Some benefits of enforcing sign-in are: +Yes. You can +[enforce sign-in](/manuals/enterprise/security/enforce-sign-in/_index.md). -- Administrators can enforce features like [Image Access Management](/manuals/security/for-admins/hardened-desktop/image-access-management.md) and [Registry Access Management](../../security/for-admins/hardened-desktop/registry-access-management.md). - - Administrators can ensure compliance by blocking Docker Desktop usage for users who don't sign in as members of the organization. +Some benefits of enforcing sign-in are: -### If a user has their personal email associated with a user account in Docker Hub, do they have to convert to using the organization's domain before they can be invited to join an organization? +- Administrators can enforce features like [Image Access Management](/manuals/enterprise/security/hardened-desktop/image-access-management.md) and [Registry Access Management](/manuals/enterprise/security/hardened-desktop/registry-access-management.md). + - Administrators can ensure compliance by blocking Docker Desktop usage for + users who don't sign in as members of the organization. -Yes. When SSO is enabled for your organization, each user must sign in with the company’s domain. However, the user can retain their personal credentials and create a new Docker ID associated with their organization's domain. - -### Can I convert my personal user account (Docker ID) to an organization account? +### Can I convert my personal Docker ID to an organization account? Yes. You can convert your user account to an organization account. Once you convert a user account into an organization, it's not possible to -revert it to a personal user account. For prerequisites and instructions, see -[Convert an account into an organization](convert-account.md). +revert it to a personal user account. -### Our users create Docker Hub accounts through self-service. How do we know when the total number of users for the requested licenses has been met? Is it possible to add more members to the organization than the total number of licenses? - -There isn't any automatic notification when the total number of users for the requested licenses has been met. However, if the number of team members exceed the number of licenses, you will receive an error informing you to contact the administrator due to lack of seats. You can [add seats](../../subscription/manage-seats.md) if needed. - -### How can I merge organization accounts? - -You can downgrade a secondary organization and transition your users and data to a primary organization. See [Merge organizations](../organization/orgs.md#merge-organizations). +For prerequisites and instructions, see +[Convert an account into an organization](convert-account.md). ### Do organization invitees take up seats? Yes. A user invited to an organization will take up one of the provisioned -seats, even if that user hasn’t accepted their invitation yet. Organization -owners can manage the list of invitees through the **Invitees** tab on the organization settings page in Docker Hub, or in the **Members** page in Admin Console. +seats, even if that user hasn’t accepted their invitation yet. + +To manage invites, see [Manage organization members](/manuals/admin/organization/members.md). ### Do organization owners take a seat? -Yes. Organization owners will take up a seat. +Yes. Organization owners occupy a seat. ### What is the difference between user, invitee, seat, and member? -User refers to a Docker user with a Docker ID. - -An invitee is a user that an administrator has invited to join an organization but has not yet accepted their invitation. - -Seats are the number of planned members within an organization. - -Member may refer to a user who has received and accepted an invitation to join an organization. Member can also refer to a member of a team within an organization. - -### If there are two organizations and a user belongs to both organizations, do they take up two seats? - -Yes. In a scenario where a user belongs to two organizations, they take up one seat in each organization. - -### Is it possible to set permissions for repositories within an organization? - -Yes. You can configure repository access on a per-team basis. For example, you -can specify that all teams within an organization have **Read and Write** access -to repositories A and B, whereas only specific teams have **Admin** access. Org -owners have full administrative access to all repositories within the -organization. See [Configure repository permissions for a team](manage-a-team.md#configure-repository-permissions-for-a-team). Administrators can also assign members the editor role, which grants administrative permissions for repositories across the namespace of the organization. See [Roles and permissions](../../security/for-admins/roles-and-permissions.md). +- User: Docker user with a Docker ID. +- Invitee: A user that an administrator has invited to join an organization but +has not yet accepted their invitation. +- Seats: The number of purchased seats in an organization. +- Member: A user who has received and accepted an invitation to join an +organization. Member can also refer to a member of a team within an +organization. -### Does my organization need to use Docker's registry? +### If I have two organizations and a user belongs to both organizations, do they take up two seats? -A registry is a hosted service containing repositories of images that responds to the Registry API. Docker Hub is Docker's primary registry, but you can use Docker with other container image registries. You can access the default registry by browsing to [Docker Hub](https://hub.docker.com) or using the `docker search` command. +Yes. In a scenario where a user belongs to two organizations, they take up one +seat in each organization. diff --git a/content/manuals/admin/organization/_index.md b/content/manuals/admin/organization/_index.md index 53cec8882596..ec1d2bc1b07f 100644 --- a/content/manuals/admin/organization/_index.md +++ b/content/manuals/admin/organization/_index.md @@ -2,8 +2,8 @@ title: Organization administration overview linkTitle: Organization administration weight: 10 -description: Learn about managing organizations in Docker including how they relate to teams, how to onboard, and more -keywords: organizations, admin, overview +description: Learn how to manage your Docker organization, including teams, members, permissions, and settings. +keywords: organizations, admin, overview, manage teams, roles grid: - title: Onboard your organization description: Learn how to onboard and secure your organization. @@ -37,7 +37,7 @@ grid: icon: key - title: Domain management description: Add, verify, and audit your domains. - link: /admin/organization/security-settings/domains/ + link: /security/for-admins/domain-management/ icon: domain_verification - title: FAQs description: Explore common organization FAQs. @@ -45,10 +45,26 @@ grid: icon: help --- -{{< include "admin-org-overview.md" >}} +A Docker organization is a collection of teams and repositories with centralized +management. It helps administrators group members and assign access in a +streamlined, scalable way. -To create an organization, see [Create your organization](../organization/orgs.md). +## Organization structure -Learn how to administer an organization in the following sections. +The following diagram shows how organizations relate to teams and members. -{{< grid >}} +![Diagram showing how teams and members relate within a Docker organization](/admin/images/org-structure.webp) + +## Organization members + +Organization owners have full administrator access to manage members, roles, +and teams across the organization. + +An organization includes members and optional teams. Teams help group members +and simplify permission management. + +## Create and manage your organization + +Learn how to create and manage your organization in the following sections. + +{{< grid >}} \ No newline at end of file diff --git a/content/manuals/admin/organization/activity-logs.md b/content/manuals/admin/organization/activity-logs.md index 7dc4659b7a0d..57ea00250a16 100644 --- a/content/manuals/admin/organization/activity-logs.md +++ b/content/manuals/admin/organization/activity-logs.md @@ -1,43 +1,76 @@ --- title: Activity logs weight: 50 -description: Learn about activity logs. -keywords: team, organization, activity, log, audit, activities +description: Learn how to access and interpret Docker activity logs for organizations and repositories. +keywords: audit log, organization activity, Docker business logs, repository activity, track changes Docker, security logs Docker, filter logs, log Docker events aliases: - /docker-hub/audit-log/ --- {{< summary-bar feature_name="Activity logs" >}} -Activity logs display a chronological list of activities that occur at organization and repository levels. It provides a report to owners on all their member activities. +Activity logs display a chronological list of activities that occur at organization and repository levels. The activity log provides organization owners with a record of all +member activities. With activity logs, owners can view and track: + - What changes were made - The date when a change was made - Who initiated the change For example, activity logs display activities such as the date when a repository was created or deleted, the member who created the repository, the name of the repository, and when there was a change to the privacy settings. -Owners can also see the activity logs for their repository if the repository is part of the organization subscribed to a Docker Business or Team plan. +Owners can also see the activity logs for their repository if the repository is part of the organization subscribed to a Docker Business or Team subscription. -## Manage activity logs +## Access activity logs {{< tabs >}} -{{< tab name="Docker Hub" >}} +{{< tab name="Admin Console" >}} + +To view activity logs in the Admin Console: -{{% admin-org-audit-log product="hub" %}} +1. Sign in to [Docker Home](https://app.docker.com) and select your +organization. +1. Select **Admin Console**, then **Activity logs**. {{< /tab >}} -{{< tab name="Admin Console" >}} +{{< tab name="Docker Hub" >}} -{{< include "admin-early-access.md" >}} +{{% include "hub-org-management.md" %}} -{{% admin-org-audit-log product="admin" %}} +To view activity logs in Docker Hub: + +1. Sign in to [Docker Hub](https://hub.docker.com). +1. Select **My Hub**, your organization, and then **Activity**. {{< /tab >}} {{< /tabs >}} -## Event definitions +## Filter and customize activity logs + +By default, the **Activity** tab displays all recorded events. To narrow your +view, use the calendar to select a specific date range. The log updates to +show only the activities that occurred during that period. + +You can also filter by activity type. Use the **All Activities** drop-down to +focus on organization-level, repository-level, or billing-related events. +In Docker Hub, when viewing a repository, the **Activities** tab only shows +events for that repository. + +After selecting a category—**Organization**, **Repository**, or **Billing**—use +the **All Actions** drop-down to refine the results even further by specific +event type. + +> [!NOTE] +> +> Events triggered by Docker Support appear under the username **dockersupport**. + +> [!IMPORTANT] +> +> Docker retains activity logs for three months. To maintain access to older +data, export logs regularly. + +## Types of activity log events Refer to the following section for a list of events and their descriptions: @@ -71,6 +104,14 @@ Refer to the following section for a list of events and their descriptions: | Policy updated | Details of updating a settings policy | | Policy deleted | Details of deleting a settings policy | | Policy transferred | Details of transferring a settings policy to another owner | +| Create SSO Connection | Details of creating a new org/company SSO connection | +| Update SSO Connection | Details of updating an existing org/company SSO connection | +| Delete SSO Connection | Details of deleting an existing org/company SSO connection | +| Enforce SSO | Details of toggling enforcement on an existing org/company SSO connection | +| Enforce SCIM | Details of toggling SCIM on an existing org/company SSO connection | +| Refresh SCIM Token | Details of a SCIM token refresh on an existing org/company SSO connection | +| Change SSO Connection Type | Details of a connection type change on an existing org/company SSO connection | +| Toggle JIT provisioning | Details of a JIT toggle on an existing org/company SSO connection | ### Repository events diff --git a/content/manuals/admin/organization/convert-account.md b/content/manuals/admin/organization/convert-account.md index da6eaada9b3f..2bd9d30ea3f2 100644 --- a/content/manuals/admin/organization/convert-account.md +++ b/content/manuals/admin/organization/convert-account.md @@ -9,13 +9,15 @@ aliases: {{< summary-bar feature_name="Admin orgs" >}} -You can convert an existing user account to an organization. This is useful if you need multiple users to access your account and the repositories that it’s connected to. Converting it to an organization gives you better control over permissions for these users through [teams](manage-a-team.md) and [roles](roles-and-permissions.md). +Learn how to convert an existing user account into an organization. This is +useful if you need multiple users to access your account and the repositories +it’s connected to. Converting it to an organization gives you better control +over permissions for these users through +[teams](/manuals/admin/organization/manage-a-team.md) and +[roles](/manuals/enterprise/security/roles-and-permissions.md). -When you convert a user account to an organization, the account is migrated to a Docker Team plan. - -> [!IMPORTANT] -> -> Once you convert your account to an organization, you can’t revert it to a user account. +When you convert a user account to an organization, the account is migrated to +a Docker Team subscription by default. ## Prerequisites @@ -24,9 +26,9 @@ Before you convert a user account to an organization, ensure that you meet the f - The user account that you want to convert must not be a member of a company or any teams or organizations. You must remove the account from all teams, organizations, or the company. To do this: - 1. Navigate to **Organizations** and then select the organization(s) you need to leave. - 2. Find your username in the **Members** tab. - 3. Select the **More options** menu and then select **Leave organization**. + 1. Navigate to **My Hub** and then select the organization you need to leave. + 1. Find your username in the **Members** tab. + 1. Select the **More options** menu and then select **Leave organization**. If the user account is the sole owner of any organization or company, assign another user the owner role and then remove yourself from the organization or company. @@ -34,36 +36,43 @@ Before you convert a user account to an organization, ensure that you meet the f If you want to convert your user account into an organization account and you don't have any other user accounts, you need to create a new user account to assign it as the owner of the new organization. With the owner role assigned, this user account has full administrative access to configure and manage the organization. You can assign more users the owner role after the conversion. -## Effects of converting an account into an organization - -Consider the following effects of converting your account: - -- This process removes the email address for the account, and organization owners will receive notification emails instead. You'll be able to reuse the removed email address for another account after converting. - -- The current plan will cancel and your new subscription will start. - -- Repository namespaces and names won't change, but converting your account removes any repository collaborators. Once you convert the account, you'll need to add those users as team members. - -- Existing automated builds will appear as if they were set up by the first owner added to the organization. See [Convert an account into an organization](#convert-an-account-into-an-organization) for steps on adding the first owner. - -- The user account that you add as the first owner will have full administrative access to configure and manage the organization. - -- Converting a user account to an organization will delete all of the user's personal access tokens. See [Create an access token](/manuals/security/for-developers/access-tokens.md#create-an-access-token) for steps on creating personal access tokens after converting the user account. +## What happens when you convert your account + +The following happens when you convert your account into +an organization: + +- This process removes the email address for the account. Notifications are +instead sent to organization owners. You'll be able to reuse the +removed email address for another account after converting. +- The current subscription will automatically cancel and your new subscription +will start. +- Repository namespaces and names won't change, but converting your account +removes any repository collaborators. Once you convert the account, you'll need +to add repository collaborators as team members. +- Existing automated builds appear as if they were set up by the first owner +added to the organization. +- The user account that you add as the first owner will have full +administrative access to configure and manage the organization. +- To transfer a user's personal access tokens (PATs) to your converted +organization, you must designate the user as an organization owner. This will +ensure any PATs associated with the user's account are transferred to the +organization owner. ## Convert an account into an organization -1. Ensure you have removed your user account from any company or teams or organizations. Also make sure that you have a new Docker ID before you convert an account. See the [Prerequisites](#prerequisites) section for details. - -2. Sign in to your [Docker account](https://app.docker.com/login). - -3. In Docker Home, select your avatar in the top-right corner to open the drop-down. - -4. Select **Account settings**. - -5. In the **Account management** section, select **Convert account**. - -6. Review the warning displayed about converting a user account. This action cannot be undone and has considerable implications for your assets and the account. - -7. Enter a **Docker ID** to set an organization owner. This is the user account that will manage the organization, and the only way to access the organization settings after conversion. You cannot use the same Docker ID as the account you are trying to convert. - -8. Select **Confirm and purchase** to confirm. The new owner receives a notification email. Use that owner account to sign in and manage the new organization. +> [!IMPORTANT] +> +> Converting an account into an organization is permanent. Back up any data + or settings you want to retain. + +1. Sign in to [Docker Home](https://app.docker.com/). +1. Select your avatar in the top-right corner to open the drop-down. +1. From **Account settings**, select **Convert**. +1. Review the warning displayed about converting a user account. This action +cannot be undone and has considerable implications for your assets and the +account. +1. Enter a **Username of new owner** to set an organization owner. The new +Docker ID you specify becomes the organization’s owner. You cannot use the +same Docker ID as the account you are trying to convert. +1. Select **Confirm**. The new owner receives a notification email. Use that +owner account to sign in and manage the new organization. diff --git a/content/manuals/admin/organization/deactivate-account.md b/content/manuals/admin/organization/deactivate-account.md new file mode 100644 index 000000000000..d799923359cd --- /dev/null +++ b/content/manuals/admin/organization/deactivate-account.md @@ -0,0 +1,64 @@ +--- +title: Deactivate an organization +description: Learn how to deactivate a Docker organization and required prerequisite steps. +keywords: delete, deactivate organization, account, organization management, Admin Console, cancel subscription +weight: 42 +aliases: +- /docker-hub/deactivate-account/ +--- + +{{< summary-bar feature_name="General admin" >}} + +Learn how to deactivate a Docker organization, including required prerequisite +steps. For information about deactivating user +accounts, see [Deactivate a user account](../../accounts/deactivate-user-account.md). + +> [!WARNING] +> +> All Docker products and services that use your Docker account or organization +account will be inaccessible after deactivating your account. + +## Prerequisites + +You must complete all the following steps before you can deactivate your +organization: + +- Download any images and tags you want to keep: + `docker pull -a :`. +- If you have an active Docker subscription, [downgrade it to a free subscription](../../subscription/change.md). +- Remove all other members within the organization. +- Unlink your [GitHub and Bitbucket accounts](../../docker-hub/repos/manage/builds/link-source.md#unlink-a-github-user-account). +- For Business organizations, [remove your SSO connection](/manuals/enterprise/security/single-sign-on/manage.md#remove-an-organization). + +## Deactivate + +You can deactivate your organization using either the Admin Console or +Docker Hub. + +> [!WARNING] +> +> This cannot be undone. Be sure you've gathered all the data you need from +your organization before deactivating it. + +{{< tabs >}} +{{< tab name="Admin Console" >}} + +1. Sign in to [Docker Home](https://app.docker.com) and select the organization +you want to deactivate. +1. Select **Admin Console**, then **Deactivate**. If the **Deactivate** +button is unavailable, confirm you've completed all [Prerequisites](#prerequisites). +1. Enter the organization name to confirm deactivation. +1. Select **Deactivate organization**. + +{{< /tab >}} +{{< tab name="Docker Hub" >}} + +{{% include "hub-org-management.md" %}} + +1. Sign in to [Docker Hub](https://hub.docker.com). +1. Choose the organization you want to deactivate. +1. In **Settings**, select **Deactivate org**. +1. Select **Deactivate organization**. + +{{< /tab >}} +{{< /tabs >}} diff --git a/content/manuals/admin/organization/general-settings.md b/content/manuals/admin/organization/general-settings.md index d6ce00340418..e5c88a96c0e4 100644 --- a/content/manuals/admin/organization/general-settings.md +++ b/content/manuals/admin/organization/general-settings.md @@ -1,32 +1,35 @@ --- -title: Organization settings +title: Organization information weight: 60 description: Learn how to manage settings for organizations using Docker Admin Console. -keywords: organization, settings, Admin Console +keywords: organization, settings, Admin Console, manage, Docker organization, Gravatar, SCIM, SSO setup, domain management, organization settings --- -{{< include "admin-early-access.md" >}} +Learn how to update your organization information using the Admin Console. -This section describes how to manage organization settings in the Docker Admin Console. - -## Configure general information +## Update organization information General organization information appears on your organization landing page in the Admin Console. This information includes: + - Organization Name - Company - Location - Website - - Gravatar email: To add an avatar to your Docker account, create a [Gravatar account](https://gravatar.com/) and create your avatar. Next, add your Gravatar email to your Docker account settings. It may take some time for your avatar to update in Docker. + - Gravatar email: To add an avatar to your Docker account, create a [Gravatar account](https://gravatar.com/) and upload an avatar. Next, add your Gravatar email to your Docker account settings. It may take some time for your avatar to update in Docker. To edit this information: -1. Sign in to the [Admin Console](https://admin.docker.com). -2. Select your company on the **Choose profile** page. -3. Under **Organization settings**, select **General**. -4. Specify the organization information and select **Save**. +1. Sign in to the [Admin Console](https://app.docker.com/admin) and +select your organization. +1. Enter or update your organization’s details, then select **Save**. ## Next steps -In the **Organization settings** menu, you can also [configure SSO](../../security/for-admins/single-sign-on/configure/) and [set up SCIM](../../security/for-admins/provisioning/scim.md). If your organization isn't part of a company, from here you can also [audit your domains](../../security/for-admins/domain-audit.md) or [create a company](new-company.md). +After configuring your organization information, you can: + +- [Configure single sign-on (SSO)](/manuals/enterprise/security/single-sign-on/configure.md) +- [Set up SCIM provisioning](/manuals/enterprise/security/provisioning/scim.md) +- [Manage domains](/manuals/enterprise/security/domain-management.md) +- [Create a company](new-company.md) diff --git a/content/manuals/admin/organization/insights.md b/content/manuals/admin/organization/insights.md index c313e49e1735..086ddf0528ab 100644 --- a/content/manuals/admin/organization/insights.md +++ b/content/manuals/admin/organization/insights.md @@ -1,7 +1,7 @@ --- -description: Gain insights about your organization's users and their Docker usage. -keywords: organization, insights title: Insights +description: Gain insights about your organization's users and their Docker usage. +keywords: organization, insights, Docker Desktop analytics, user usage statistics, Docker Business, track Docker activity --- {{< summary-bar feature_name="Insights" >}} @@ -13,34 +13,33 @@ productivity and efficiency across the organization. Key benefits include: -- Uniform working environment. Establish and maintain standardized +- Uniform working environment: Establish and maintain standardized configurations across teams. -- Best practices. Promote and enforce usage guidelines to ensure optimal +- Best practices: Promote and enforce usage guidelines to ensure optimal performance. -- Increased visibility. Monitor and drive adoption of organizational +- Increased visibility: Monitor and drive adoption of organizational configurations and policies. -- Optimized license use. Ensure that developers have access to advanced +- Optimized license use: Ensure that developers have access to advanced features provided by a Docker subscription. ## Prerequisites +To use Insights, you must meet the following requirements: + - [Docker Business subscription](../../subscription/details.md#docker-business) -- Administrators must [enforce sign-in](/security/for-admins/enforce-sign-in/) for users -- Insights enabled by your Customer Success Manager +- Administrators must [enforce sign-in](/security/for-admins/enforce-sign-in/) +for users +- Your Account Executive must turn on Insights for your organization ## View Insights for organization users -{{< include "admin-early-access.md" >}} +To access Insights, contact your Account Executive to have the +feature turned on. Once the feature is turned on, access Insights using the +following steps: -To access Insights, you must contact your Customer Success Manager to have the -feature enabled. Once the feature is enabled, access Insights using the following -steps: - -1. Go to the [Admin Console](https://app.docker.com/admin/) and sign in to an - account that is an organization owner. -2. Select your company on the **Choose profile** page. -3. Select **Insights**. -4. On the **Insights** page, select the period of time for the data. +1. Sign in to [Docker Home](https://app.docker.com/) and choose +your organization. +1. Select **Insights**. then select the period of time for the data. > [!NOTE] > @@ -48,7 +47,7 @@ steps: > Insights page, view the **Last updated** date to understand when the data was > last updated. -You can view data in the following charts: +Insights data is displayed in the following charts: - [Docker Desktop users](#docker-desktop-users) - [Builds](#builds) @@ -65,13 +64,13 @@ organization, providing insights into how many users are actively using Docker Desktop. Note that users who opt out of analytics aren't included in the active counts. -The chart contains the following data. +The chart contains the following data: | Data | Description | |:-----------------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Active user | The number of users that have actively used Docker Desktop and either signed in with a Docker account that has a license in your organization or signed in to a Docker account with an email address from a domain associated with your organization.

Users who don’t sign in to an account associated with your organization are not represented in the data. To ensure users sign in with an account associated with your organization, you can [enforce sign-in](/security/for-admins/enforce-sign-in/). | -| Total organization members | The number of users that have used Docker Desktop, regardless of their Insights activity. | -| Users opted out of analytics | The number of users that are a member of your organization that have opted out of sending analytics.

When users opt out of sending analytics, you won't see any of their data in Insights. To ensure that the data includes all users, you can use [Settings Management](/desktop/hardened-desktop/settings-management/) to set `analyticsEnabled` for all your users. | +| Active user | The number of users who have actively used Docker Desktop and either signed in with a Docker account that has a license in your organization or signed in to a Docker account with an email address from a domain associated with your organization.

Users who don’t sign in to an account associated with your organization are not represented in the data. To ensure users sign in with an account associated with your organization, you can [enforce sign-in](/security/for-admins/enforce-sign-in/). | +| Total organization members | The number of users who have used Docker Desktop, regardless of their Insights activity. | +| Users opted out of analytics | The number of users who are members of your organization that have opted out of sending analytics.

When users opt out of sending analytics, you won't see any of their data in Insights. To ensure that the data includes all users, you can use [Settings Management](/desktop/hardened-desktop/settings-management/) to set `analyticsEnabled` for all your users. | | Active users (graph) | The view over time for total active users. | @@ -81,7 +80,7 @@ Monitor development efficiency and the time your team invests in builds with this chart. It provides a clear view of the build activity, helping you identify patterns, optimize build times, and enhance overall development productivity. -The chart contains the following data. +The chart contains the following data: | Data | Description | |:-----------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| @@ -96,7 +95,7 @@ View the total and average number of containers run by users with this chart. It lets you gauge container usage across your organization, helping you understand usage trends and manage resources effectively. -The chart contains the following data. +The chart contains the following data: | Data | Description | |:---------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| @@ -111,7 +110,7 @@ workflows and ensure compatibility. It provides valuable insights into how Docker Desktop is being utilized, enabling you to streamline processes and improve efficiency. -The chart contains the following data. +The chart contains the following data: | Data | Description | |:----------------------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| @@ -128,10 +127,11 @@ usage, ensuring that the most critical resources are readily available and efficiently used. > [!NOTE] +> > Data for images is only for Docker Hub. Data for third-party > registries and mirrors aren't included. -The chart contains the following data. +The chart contains the following data: | Data | Description | |:---------------------|:----------------------------------------------------------------------------------------------------------------| @@ -142,44 +142,78 @@ The chart contains the following data. ### Extensions Monitor extension installation activity with this chart. It provides visibility -into the Docker Desktop extensions your team are using, letting you track +into the Docker Desktop extensions your teams are using, letting you track adoption and identify popular tools that enhance productivity. -The chart contains the following data. +The chart contains the following data: | Data | Description | |:-----------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------| | Percentage of org with extensions installed | The percentage of users in your organization with at least one Docker Desktop extension installed. | | Top 5 extensions installed in the organization | A list of the top 5 Docker Desktop extensions installed by users in your organization and the number of users who have installed each extension. | +## Export Docker Desktop user data + +You can export Docker Desktop user data as a CSV file: + +1. Open [Docker Home](https://app.docker.com) and select your organization +on the **Choose profile** page. +1. Select **Admin Console** in the left-hand navigation menu. +1. Select **Desktop insights**. +1. Choose a timeframe for your insights data: **1 Week**, **1 Month**, or +**3 Months**. +1. Select **Export** and choose **Docker Desktop users** from the drop-down. + +Your export will automatically download. Open the file to view +the export data. + +### Understanding export data + +A Docker Desktop user export file contains the following data points: + +- Name: User's name +- Username: User's Docker ID +- Email: User's email address associated with their Docker ID +- Type: User type +- Role: User [role](/manuals/enterprise/security/roles-and-permissions.md) +- Teams: Team(s) within your organization the user is a +member of +- Date Joined: The date the user joined your organization +- Last Logged-In Date: The last date the user logged into Docker using +their web browser (this includes Docker Hub and Docker Home) +- Docker Desktop Version: The version of Docker Desktop the user has +installed +- Last Seen Date: The last date the user used the Docker Desktop application +- Opted Out Analytics: Whether the user has opted out of the +[Send usage statistics](/manuals/enterprise/security/hardened-desktop/settings-management/settings-reference.md#send-usage-statistics) setting in Docker Desktop ## Troubleshoot Insights If you’re experiencing issues with data in Insights, consider the following -solutions to resolve common problems. +solutions to resolve common problems: -* Update users to the latest version of Docker Desktop. +- Update users to the latest version of Docker Desktop. Data is not shown for users using versions 4.16 or lower of Docker Desktop. In addition, older versions may not provide all data. Ensure all users have installed the latest version of Docker Desktop. -* Enable **Send usage statistics** in Docker Desktop for all your users. +- Turn on **Send usage statistics** in Docker Desktop for all your users. If users have opted out of sending usage statistics for Docker Desktop, then their usage data will not be a part of Insights. To manage the setting at scale for all your users, you can use [Settings - Management](/desktop/hardened-desktop/settings-management/) and enable the + Management](/desktop/hardened-desktop/settings-management/) and turn on the `analyticsEnabled` setting. -* Ensure that users are using Docker Desktop and aren't using the standalone +- Ensure users use Docker Desktop and aren't using the standalone version of Docker Engine. - Only Docker Desktop can provide data for Insights. If a user installs and - uses Docker Engine outside of Docker Desktop, Docker Engine won't provide + Only Docker Desktop can provide data for Insights. If a user installs Docker + Engine outside of Docker Desktop, Docker Engine won't provide data for that user. -* Ensure that users are signing in to an account associated with your +- Make sure users sign in to an account associated with your organization. Users who don’t sign in to an account associated with your organization are diff --git a/content/manuals/admin/organization/manage-a-team.md b/content/manuals/admin/organization/manage-a-team.md index e0a06d291715..5a52abe7916d 100644 --- a/content/manuals/admin/organization/manage-a-team.md +++ b/content/manuals/admin/organization/manage-a-team.md @@ -2,89 +2,112 @@ title: Create and manage a team weight: 40 description: Learn how to create and manage teams for your organization -keywords: Docker, docker, registry, teams, organizations, plans, Dockerfile, Docker - Hub, docs, documentation, repository permissions +keywords: docker, registry, teams, organizations, plans, Dockerfile, Docker + Hub, docs, documentation, repository permissions, configure repository access, team management aliases: - /docker-hub/manage-a-team/ --- {{< summary-bar feature_name="Admin orgs" >}} -You can create teams for your organization in Docker Hub and the Docker Admin Console. You can [configure repository access for a team](#configure-repository-permissions-for-a-team) in Docker Hub. +You can create teams for your organization in the Admin Console or Docker Hub, +and configure team repository access in Docker Hub. -A team is a group of Docker users that belong to an organization. An organization can have multiple teams. An organization owner can then create new teams and add members to an existing team using their Docker ID or email address and by selecting a team the user should be part of. Members aren't required to be part of a team to be associated with an organization. +A team is a group of Docker users that belong to an organization. An +organization can have multiple teams. An organization owner can create new +teams and add members to an existing team using their Docker ID or email +address. Members aren't required to be part of a team to be associated with an +organization. -The organization owner can add additional organization owners to help them manage users, teams, and repositories in the organization by assigning them the owner role. +The organization owner can add additional organization owners to help them +manage users, teams, and repositories in the organization by assigning them +the owner role. -## Organization owner +## What is an organization owner? An organization owner is an administrator who has the following permissions: -- Manage repositories and add team members to the organization. -- Access private repositories, all teams, billing information, and organization settings. -- Specify [permissions](#permissions-reference) for each team in the organization. -- Enable [SSO](../../security/for-admins/single-sign-on/_index.md) for the organization. +- Manage repositories and add team members to the organization +- Access private repositories, all teams, billing information, and +organization settings +- Specify [permissions](#permissions-reference) for each team in the +organization +- Enable [SSO](/manuals/enterprise/security/single-sign-on/_index.md) for the +organization When SSO is enabled for your organization, the organization owner can also manage users. Docker can auto-provision Docker IDs for new end-users or users who'd like to have a separate Docker ID for company use through SSO enforcement. -The organization owner can also add additional organization owners to help them manage users, teams, and repositories in the organization. +Organization owners can add others with the owner role to help them +manage users, teams, and repositories in the organization. + +For more information on roles, see +[Roles and permissions](/manuals/enterprise/security/roles-and-permissions.md). ## Create a team {{< tabs >}} -{{< tab name="Docker Hub" >}} +{{< tab name="Admin Console" >}} -1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select **Organizations** and choose your organization. -3. Select the **Teams** tab and then select **Create Team**. -4. Fill out your team's information and select **Create**. -5. [Add members to your team](members.md#add-a-member-to-a-team). +1. Sign in to [Docker Home](https://app.docker.com) and select your +organization. +1. Select **Teams**. {{< /tab >}} -{{< tab name="Admin Console" >}} +{{< tab name="Docker Hub" >}} -{{< include "admin-early-access.md" >}} +{{% include "hub-org-management.md" %}} -1. In Admin Console, select your organization. -2. In the **User management** section, select **Teams**. -3. Select **Create team**. -4. Fill out your team's information and select **Create**. -5. [Add members to your team](members.md#add-a-member-to-a-team). +1. Sign in to [Docker Hub](https://hub.docker.com). +1. Select **My Hub** and choose your organization. +1. Select the **Teams** and then select **Create Team**. +1. Fill out your team's information and select **Create**. +1. [Add members to your team](members.md#add-a-member-to-a-team). {{< /tab >}} {{< /tabs >}} -## Configure repository permissions for a team +## Set team repository permissions Organization owners can configure repository permissions on a per-team basis. -For example, you can specify that all teams within an organization have "Read and -Write" access to repositories A and B, whereas only specific teams have "Admin" -access. Note that organization owners have full administrative access to all repositories within the organization. +For example, you can specify that all teams within an organization have +"Read and Write" access to repositories A and B, whereas only specific +teams have "Admin" access. + +Note that organization owners have full administrative access to all +repositories within the organization. To give a team access to a repository: 1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select **Organizations** and choose your organization. -3. Select the **Teams** tab and select the team that you'd like to configure repository access to. -4. Select the **Permissions** tab and select a repository from the - **Repository** drop-down. -5. Choose a permission from the **Permissions** drop-down list and select - **Add**. - -Organization owners can also assign members the editor role to grant partial administrative access. See [Roles and permissions](../../security/for-admins/roles-and-permissions.md) for more about the editor role. +1. Select **My Hub** and choose your organization. +1. In the **Teams** section, select the team you want to configure repository +access for. +1. Select the **Permissions** tab and select a repository from the +**Repository** drop-down. +1. Choose a permission from the **Permissions** drop-down list and select +**Add**. + +Organization owners can also assign members the editor role to grant partial +administrative access. For more information on the editor role, see +[Roles and permissions](/manuals/enterprise/security/roles-and-permissions.md). ### Permissions reference -- `Read-only` access lets users view, search, and pull a private repository in the same way as they can a public repository. -- `Read & Write` access lets users pull, push, and view a repository. In addition, it lets users view, cancel, retry or trigger builds +- `Read-only` access lets users view, search, and pull a private repository +in the same way as they can a public repository. +- `Read & Write` access lets users pull, push, and view a repository. In +addition, it lets users view, cancel, retry or trigger builds. - `Admin` access lets users pull, push, view, edit, and delete a - repository. You can also edit build settings, and update the repositories description, collaborators rights, public/private visibility, and delete. + repository. You can also edit build settings and update the repository’s + description, collaborator permissions, public/private visibility, and delete. Permissions are cumulative. For example, if you have "Read & Write" permissions, -you automatically have "Read-only" permissions: +you automatically have "Read-only" permissions. + +The following table shows what each permission level allows users to do: | Action | Read-only | Read & Write | Admin | |:------------------:|:---------:|:------------:|:-----:| @@ -102,44 +125,48 @@ you automatically have "Read-only" permissions: > [!NOTE] > -> A user who hasn't verified their email address only has -> `Read-only` access to the repository, regardless of the rights their team -> membership has given them. +> A user who hasn't verified their email address only has `Read-only` access to +the repository, regardless of the rights their team membership has given them. -## View a team's permissions for all repositories +## View team permissions for all repositories To view a team's permissions across all repositories: 1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select **Organizations** and choose your organization. -3. Select **Teams** and choose your team name. -4. Select the **Permissions** tab, where you can view the repositories this team can access. +1. Select **My Hub** and choose your organization. +1. Select **Teams** and choose your team name. +1. Select the **Permissions** tab, where you can view the repositories this +team can access. ## Delete a team -Organization owners can delete a team in Docker Hub or Admin Console. When you remove a team from your organization, this action revokes the members' access to the team's permitted resources. It won't remove users from other teams that they belong to, nor will it delete any resources. +Organization owners can delete a team. When you remove a team from your +organization, this action revokes member access to the team's permitted +resources. It won't remove users from other teams that they belong to, and it +won't delete any resources. {{< tabs >}} -{{< tab name="Docker Hub" >}} +{{< tab name="Admin Console" >}} -1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select **Organizations** and choose your organization. -3. Select the **Teams** tab. -4. Select the name of the team that you want to delete. -5. Select **Settings**. -6. Select **Delete Team**. -7. Review the confirmation message, then select **Delete**. +1. Sign in to [Docker Home](https://app.docker.com/) and select your +organization. +1. Select **Teams**. +1. Select the **Actions** icon next to the name of the team you want to delete. +1. Select **Delete team**. +1. Review the confirmation message, then select **Delete**. {{< /tab >}} -{{< tab name="Admin Console" >}} +{{< tab name="Docker Hub" >}} -{{< include "admin-early-access.md" >}} +{{% include "hub-org-management.md" %}} -1. In the [Admin Console](https://app.docker.com/admin), select your organization. -2. In the **User management** section, select **Teams**. -3. Select the **Actions** icon next to the name of the team you want to delete. -4. Select **Delete team**. -5. Review the confirmation message, then select **Delete**. +1. Sign in to [Docker Hub](https://hub.docker.com). +1. Select **My Hub** and choose your organization. +1. Select **Teams**. +1. Select the name of the team that you want to delete. +1. Select **Settings**. +1. Select **Delete Team**. +1. Review the confirmation message, then select **Delete**. {{< /tab >}} {{< /tabs >}} diff --git a/content/manuals/admin/organization/manage-products.md b/content/manuals/admin/organization/manage-products.md index b20004ed23f3..40125f86b2af 100644 --- a/content/manuals/admin/organization/manage-products.md +++ b/content/manuals/admin/organization/manage-products.md @@ -1,8 +1,8 @@ --- title: Manage Docker products weight: 45 -description: Learn how to manage Docker products for your organization -keywords: organization, tools, products +description: Learn how to manage access and usage for Docker products for your organization +keywords: organization, tools, products, product access, organization management --- {{< summary-bar feature_name="Admin orgs" >}} @@ -11,99 +11,105 @@ In this section, learn how to manage access and view usage of the Docker products for your organization. For more detailed information about each product, including how to set up and configure them, see the following manuals: -- [Docker Build Cloud](../../build-cloud/_index.md) - [Docker Desktop](../../desktop/_index.md) - [Docker Hub](../../docker-hub/_index.md) +- [Docker Build Cloud](../../build-cloud/_index.md) - [Docker Scout](../../scout/_index.md) - [Testcontainers Cloud](https://testcontainers.com/cloud/docs/#getting-started) -## Manage access to Docker products +## Manage product access for your organization -Access to Docker products included in your subscription is enabled by default -for all users. The included products are: +Access to the Docker products included in your subscription is turned on by +default for all users. For an overview of products included in your +subscription, see +[Docker subscriptions and features](/manuals/subscription/details.md). -- Docker Hub -- Docker Build Cloud -- Docker Desktop -- Docker Scout +{{< tabs >}} +{{< tab name="Docker Desktop" >}} -Testcontainers Cloud is not enabled by default. To enable Testcontainers Cloud, see the Testcontainers [Getting Started](https://testcontainers.com/cloud/docs/#getting-started) guide. +### Manage Docker Desktop access -The following sections describe how to enable or disable access for these products. +To manage Docker Desktop access: -### Manage access to Docker Build Cloud +1. [Enforce sign-in](/manuals/enterprise/security/enforce-sign-in/_index.md). +1. Manage members [manually](./members.md) or use +[provisioning](/manuals/enterprise/security/provisioning/_index.md). -To learn how to initially set up and configure Docker Build Cloud, sign in to -the [Docker Build Cloud Dashboard](https://app.docker.com/build) and follow the -on-screen instructions. +With sign-in enforced, only users who are a member of your organization can +use Docker Desktop after signing in. -To manage access to Docker Build Cloud, sign in to [Docker Build -Cloud](http://app.docker.com/build) as an organization owner, select **Account -settings**, and then manage access under **Lock Docker Build Cloud**. +{{< /tab >}} +{{< tab name="Docker Hub" >}} -### Manage access to Docker Scout +### Manage Docker Hub access -To learn how to initially set up and configure Docker Scout for remote -repositories, sign in to the [Docker Scout Dashboard](https://scout.docker.com/) -and follow the on-screen instructions. +To manage Docker Hub access, sign in to +[Docker Home](https://app.docker.com/) and configure [Registry Access Management](/manuals/enterprise/security/hardened-desktop/registry-access-management.md) +or [Image Access Management](/manuals/enterprise/security/hardened-desktop/image-access-management.md). -To manage access to Docker Scout for use on remote repositories, sign in to the -[Docker Scout Dashboard](https://scout.docker.com/) and configure -[integrations](../../scout/explore/dashboard.md#integrations) and [repository -settings](../../scout/explore/dashboard.md#repository-settings). +{{< /tab >}} +{{< tab name="Docker Build Cloud" >}} -To manage access to Docker Scout for use on local images with Docker Desktop, use -[Settings -Management](../../security/for-admins/hardened-desktop/settings-management/_index.md) -and set `sbomIndexing` to `false` to disable, or to `true` to enable. +### Manage Docker Build Cloud access -### Manage access to Docker Hub +To initially set up and configure Docker Build Cloud, sign in to +[Docker Build Cloud](https://app.docker.com/build) and follow the +on-screen instructions. -To manage access to Docker Hub, sign in to the [Docker Admin Console](https://app.docker.com/admin) and configure [Registry Access -Management](../../security/for-admins/hardened-desktop/registry-access-management.md) -or [Image Access -Management](../../security/for-admins/hardened-desktop/image-access-management.md). +To manage Docker Build Cloud access: -### Manage access to Testcontainers Cloud +1. Sign in to [Docker Build Cloud](http://app.docker.com/build) as an +organization owner. +1. Select **Account settings**. +1. Select **Lock access to Docker Build Account**. -To learn how to initially set up and configure Testcontainers Cloud, sign in to -[Testcontainers Cloud](https://app.testcontainers.cloud/) and follow the -on-screen instructions. +{{< /tab >}} +{{< tab name="Docker Scout" >}} -To manage access to Testcontainers Cloud, sign in to the [Testcontainers Cloud -Settings page](https://app.testcontainers.cloud/dashboard/settings) as -an organization owner, and then manage access under **Lock Testcontainers -Cloud**. +### Manage Docker Scout access -### Manage access to Docker Desktop +To initially set up and configure Docker Scout, sign in to +[Docker Scout](https://scout.docker.com/) and follow the on-screen instructions. -To manage access to Docker Desktop, you can [enforce -sign-in](../../security/for-admins/enforce-sign-in/_index.md), then and manage -members [manually](./members.md) or use -[provisioning](../../security/for-admins/provisioning/_index.md). With sign-in -enforced, only users who are a member of your organization can use Docker -Desktop after signing in. +To manage Docker Scout access: -## View Docker product usage +1. Sign in to [Docker Scout](https://scout.docker.com/) as an organization +owner. +1. Select your organization, then **Settings**. +1. To manage what repositories are enabled for Docker Scout analysis, select +**Repository settings**. For more information on, +see [repository settings](../../scout/explore/dashboard.md#repository-settings). +1. To manage access to Docker Scout for use on local images with Docker Desktop, +use [Settings Management](/manuals/enterprise/security/hardened-desktop/settings-management/_index.md) +and set `sbomIndexing` to `false` to disable, or to `true` to enable. + +{{< /tab >}} +{{< tab name="Testcontainers Cloud" >}} + +### Manage Testcontainers Cloud access + +To initially set up and configure Testcontainers Cloud, sign in to +[Testcontainers Cloud](https://app.testcontainers.cloud/) and follow the +on-screen instructions. -View usage for the products on the following pages: +To manage access to Testcontainers Cloud: -- Docker Build Cloud: View the **Build minutes** page in the [Docker Build Cloud - Dashboard](http://app.docker.com/build). +1. Sign in to the [Testcontainers Cloud](https://app.testcontainers.cloud/) and +select **Account**. +1. Select **Settings**, then **Lock access to Testcontainers Cloud**. -- Docker Scout: View the [**Repository settings** - page](https://scout.docker.com/settings/repos) in the Docker Scout - Dashboard. +{{< /tab >}} +{{< /tabs >}} -- Docker Hub: View the [**Usage** page](https://hub.docker.com/usage) in Docker - Hub. +## Monitor product usage for your organization -- Testcontainers Cloud: View the [**Billing** - page](https://app.testcontainers.cloud/dashboard/billing) in the - Testcontainers Cloud Dashboard. +To view usage for Docker products: -- Docker Desktop: View the **Insights** page in the [Docker Admin Console](https://app.docker.com/admin). For more details, see - [Insights](./insights.md). +- Docker Desktop: View the **Insights** page in [Docker Home](https://app.docker.com/). For more details, see [Insights](./insights.md). +- Docker Hub: View the [**Usage** page](https://hub.docker.com/usage) in Docker Hub. +- Docker Build Cloud: View the **Build minutes** page in [Docker Build Cloud](http://app.docker.com/build). +- Docker Scout: View the [**Repository settings** page](https://scout.docker.com/settings/repos) in Docker Scout. +- Testcontainers Cloud: View the [**Billing** page](https://app.testcontainers.cloud/dashboard/billing) in Testcontainers Cloud. -If your usage exceeds your subscription amount, you can [scale your -subscription](../../subscription/scale.md) to meet your needs. \ No newline at end of file +If your usage or seat count exceeds your subscription amount, you can +[scale your subscription](../../subscription/scale.md) to meet your needs. diff --git a/content/manuals/admin/organization/members.md b/content/manuals/admin/organization/members.md index 888435904b3a..f8947e3c62ee 100644 --- a/content/manuals/admin/organization/members.md +++ b/content/manuals/admin/organization/members.md @@ -2,7 +2,7 @@ title: Manage organization members weight: 30 description: Learn how to manage organization members in Docker Hub and Docker Admin Console. -keywords: members, teams, organizations, invite members, manage team members +keywords: members, teams, organizations, invite members, manage team members, export member list, edit roles, organization teams, user management aliases: - /docker-hub/members/ --- @@ -12,16 +12,156 @@ Learn how to manage members for your organization in Docker Hub and the Docker A ## Invite members {{< tabs >}} -{{< tab name="Docker Hub" >}} +{{< tab name="Admin Console" >}} + +Owners can invite new members to an organization via Docker ID, email address, or with a CSV file containing email addresses. If an invitee does not have a Docker account, they must create an account and verify their email address before they can accept an invitation to join the organization. When inviting members, their pending invitation occupies a seat. + +### Invite members via Docker ID or email address + +Use the following steps to invite members to your organization via Docker ID or email address. + +1. Sign in to [Docker Home](https://app.docker.com) and select your organization. +1. Select **Members**, then **Invite**. +1. Select **Emails or usernames**. +1. Follow the on-screen instructions to invite members. Invite a maximum of 1000 members and separate multiple entries by comma, semicolon, or space. + +> [!NOTE] +> +> When you invite members, you assign them a role. See [Roles and permissions](/manuals/enterprise/security/roles-and-permissions.md) for +details about the access permissions for each role. + +Pending invitations appear in the table. Invitees receive an email with a link to Docker Hub where they can accept or decline the invitation. -{{% admin-users product="hub" %}} +### Invite members via CSV file + +To invite multiple members to an organization via a CSV file containing email addresses: + +1. Sign in to [Docker Home](https://app.docker.com) and select your organization. +1. Select **Members**, then **Invite**. +1. Select **CSV upload**. +1. Optional. Select **Download the template CSV file** to download an example CSV file. The following is an example of the contents of a valid CSV file. + +```text +email +docker.user-0@example.com +docker.user-1@example.com +``` + +CSV file requirements: + +- The file must contain a header row with at least one heading named email. Additional columns are allowed and are ignored in the import. +- The file must contain a maximum of 1000 email addresses (rows). To invite more than 1000 users, create multiple CSV files and perform all steps in this task for each file. + +1. Create a new CSV file or export a CSV file from another application. + +- To export a CSV file from another application, see the application’s documentation. +- To create a new CSV file, open a new file in a text editor, type email on the first line, type the user email addresses one per line on the following lines, and then save the file with a .csv extension. + +1. Select **Browse files** and then select your CSV file, or drag and drop the CSV file into the **Select a CSV file to upload** box. You can only select one CSV file at a time. + +> [!NOTE] +> +> If the amount of email addresses in your CSV file exceeds the number of available seats in your organization, you cannot continue to invite members. To invite members, you can purchase more seats, or remove some email addresses from the CSV file and re-select the new file. To purchase more seats, see [Add seats](/manuals/subscription/manage-seats.md) to your subscription or [Contact sales](https://www.docker.com/pricing/contact-sales/). + +1. After the CSV file has been uploaded, select **Review**. + +Valid email addresses and any email addresses that have issues appear. Email addresses may have the following issues: + +- Invalid email: The email address is not a valid address. The email address will be ignored if you send invites. You can correct the email address in the CSV file and re-import the file. +- Already invited: The user has already been sent an invite email and another invite email will not be sent. +- Member: The user is already a member of your organization and an invite email will not be sent. +- Duplicate: The CSV file has multiple occurrences of the same email address. The user will be sent only one invite email. + +1. Follow the on-screen instructions to invite members. + +> [!NOTE] +> +> When you invite members, you assign them a role. See [Roles and permissions](/manuals/enterprise/security/roles-and-permissions.md) for +details about the access permissions for each role. + +Pending invitations appear in the table. The invitees receive an email with a link to Docker Hub where they can accept or decline the invitation. + +### Invite members via API + +You can bulk invite members using the Docker Hub API. For more information, see the [Bulk create invites](https://docs.docker.com/reference/api/hub/latest/#tag/invites/paths/~1v2~1invites~1bulk/post) API endpoint. {{< /tab >}} -{{< tab name="Admin Console" >}} +{{< tab name="Docker Hub" >}} + +{{% include "hub-org-management.md" %}} + +Owners can invite new members to an organization via Docker ID, email address, or with a CSV file containing email addresses. If an invitee does not have a Docker account, they must create an account and verify their email address before they can accept an invitation to join the organization. When inviting members, their pending invitation occupies a seat. + +### Invite members via Docker ID or email address + +Use the following steps to invite members to your organization via Docker ID or email address. -{{< include "admin-early-access.md" >}} +1. Sign in to [Docker Hub](https://hub.docker.com). +1. Select **My Hub**, your organization, then **Members**. +1. Select **Invite members**. +1. Select **Emails or usernames**. +1. Follow the on-screen instructions to invite members. Invite a maximum of 1000 members and separate multiple entries by comma, semicolon, or space. + +> [!NOTE] +> +> When you invite members, you assign them a role. See [Roles and permissions](/manuals/enterprise/security/roles-and-permissions.md) for +details about the access permissions for each role. + +Pending invitations appear in the table. The invitees receive an email with a link to Docker Hub where they can accept or decline the invitation. + +### Invite members via CSV file + +To invite multiple members to an organization via a CSV file containing email addresses: + +1. Sign in to [Docker Hub](https://hub.docker.com). +1. Select **My Hub**, your organization, then **Members**. +1. Select **Invite members**. +1. Select **CSV upload**. +1. Optional. Select **Download the template CSV file** to download an example CSV file. The following is an example of the contents of a valid CSV file. + +```text +email +docker.user-0@example.com +docker.user-1@example.com +``` + +CSV file requirements: + +- The file must contain a header row with at least one heading named email. Additional columns are allowed and are ignored in the import. +- The file must contain a maximum of 1000 email addresses (rows). To invite more than 1000 users, create multiple CSV files and perform all steps in this task for each file. + +1. Create a new CSV file or export a CSV file from another application. + +- To export a CSV file from another application, see the application’s documentation. +- To create a new CSV file, open a new file in a text editor, type email on the first line, type the user email addresses one per line on the following lines, and then save the file with a .csv extension. + +1. Select **Browse files** and then select your CSV file, or drag and drop the CSV file into the **Select a CSV file to upload** box. You can only select one CSV file at a time. + +> [!NOTE] +> +> If the amount of email addresses in your CSV file exceeds the number of available seats in your organization, you cannot continue to invite members. To invite members, you can purchase more seats, or remove some email addresses from the CSV file and re-select the new file. To purchase more seats, see [Add seats](/manuals/subscription/manage-seats.md) to your subscription or [Contact sales](https://www.docker.com/pricing/contact-sales/). + +1. After the CSV file has been uploaded, select **Review**. -{{% admin-users product="admin" %}} +Valid email addresses and any email addresses that have issues appear. Email addresses may have the following issues: + +- Invalid email: The email address is not a valid address. The email address will be ignored if you send invites. You can correct the email address in the CSV file and re-import the file. +- Already invited: The user has already been sent an invite email and another invite email will not be sent. +- Member: The user is already a member of your organization and an invite email will not be sent. +- Duplicate: The CSV file has multiple occurrences of the same email address. The user will be sent only one invite email. + +1. Follow the on-screen instructions to invite members. + +> [!NOTE] +> +> When you invite members, you assign them a role. See [Roles and permissions](/manuals/enterprise/security/roles-and-permissions.md) for +details about the access permissions for each role. + +Pending invitations appear in the table. The invitees receive an email with a link to Docker Hub where they can accept or decline the invitation. + +### Invite members via API + +You can bulk invite members using the Docker Hub API. For more information, see the [Bulk create invites](https://docs.docker.com/reference/api/hub/latest/#tag/invites/paths/~1v2~1invites~1bulk/post) API endpoint. {{< /tab >}} {{< /tabs >}} @@ -32,17 +172,25 @@ When an invitation is to a user's email address, they receive a link to Docker Hub where they can accept or decline the invitation. To accept an invitation: -1. Navigate to your email inbox and open the Docker email with an invitation to +1. Check your email inbox and open the Docker email with an invitation to join the Docker organization. -2. To open the link to Docker Hub, select the **click here** link. -3. The Docker create an account page will open. If you already have an account, select **Already have an account? Sign in**. +1. To open the link to Docker Hub, select the **click here** link. + + > [!WARNING] + > + > Invitation email links expire after 14 days. If your email link has expired, + > you can sign in to [Docker Hub](https://hub.docker.com/) with the email + > address the link was sent to and accept the invitation from the + > **Notifications** panel. + +1. The Docker create an account page will open. If you already have an account, select **Already have an account? Sign in**. If you do not have an account yet, create an account using the same email address you received the invitation through. -4. Optional. If you do not have an account and created one, you must navigate +1. Optional. If you do not have an account and created one, you must navigate back to your email inbox and verify your email address using the Docker verification email. -5. Once you are signed in to Docker Hub, select **Organizations** from the top-level navigation menu. -6. The organizations page will display your invitation. Select **Accept**. +1. Once you are signed in to Docker Hub, select **My Hub** from the top-level navigation menu. +1. Select **Accept** on your invitation. After accepting an invitation, you are now a member of the organization. @@ -53,27 +201,42 @@ After inviting members, you can resend or remove invitations as needed. ### Resend an invitation {{< tabs >}} -{{< tab name="Docker Hub" >}} +{{< tab name="Admin Console" >}} -To resend an invitation from Docker Hub: +You can send individual invitations, or bulk invitations from the Admin Console. -1. Sign in to [Docker Hub](https://hub.docker.com/). -2. Select **Organizations**, your organization, and then **Members**. -3. In the table, locate the invitee, select the **Actions** icon, and then select -**Resend invitation**. -4. Select **Invite** to confirm. +To resend an individual invitation: + +1. Sign in to [Docker Home](https://app.docker.com/) and select your +organization. +1. Select **Members**. +1. Select the **action menu** next to the invitee and select **Resend**. +1. Select **Invite** to confirm. + +To bulk resend invitations: + +1. Sign in to [Docker Home](https://app.docker.com/) and select your +organization. +1. Select **Members**. +1. Use the **checkboxes** next to **Usernames** to bulk select users. +1. Select **Resend invites**. +1. Select **Resend** to confirm. {{< /tab >}} -{{< tab name="Admin Console" >}} +{{< tab name="Docker Hub" >}} -{{< include "admin-early-access.md" >}} +{{% include "hub-org-management.md" %}} + +To resend an invitation from Docker Hub: -To resend an invitation from the Admin Console: +1. Sign in to [Docker Hub](https://hub.docker.com/). +1. Select **My Hub**, your organization, and then **Members**. +1. In the table, locate the invitee, select the **Actions** icon, and then select +**Resend invitation**. +1. Select **Invite** to confirm. -1. In the [Admin Console](https://app.docker.com/admin), select your organization. -2. Select **Members**. -3. Select the **action menu** next to the invitee and select **Resend invitation**. -4. Select **Invite** to confirm. +You can also resend an invitation using the Docker Hub API. For more information, +see the [Resend an invite](https://docs.docker.com/reference/api/hub/latest/#tag/invites/paths/~1v2~1invites~1%7Bid%7D~1resend/patch) API endpoint. {{< /tab >}} {{< /tabs >}} @@ -81,26 +244,30 @@ To resend an invitation from the Admin Console: ### Remove an invitation {{< tabs >}} -{{< tab name="Docker Hub" >}} +{{< tab name="Admin Console" >}} -To remove a member's invitation from Docker Hub: +To remove an invitation from the Admin Console: -1. Sign in to [Docker Hub](https://hub.docker.com/). -2. Select **Organizations**, your organization, and then **Members**. -3. In the table, select the **Action** icon, and then select **Remove member** or **Remove invitee**. -4. Follow the on-screen instructions to remove the member or invitee. +1. Sign in to [Docker Home](https://app.docker.com/) and select your +organization. +1. Select **Members**. +1. Select the **action menu** next to the invitee and select **Remove invitee**. +1. Select **Remove** to confirm. {{< /tab >}} -{{< tab name="Admin Console" >}} +{{< tab name="Docker Hub" >}} -{{< include "admin-early-access.md" >}} +{{% include "hub-org-management.md" %}} -To remove an invitation from the Admin Console: +To remove a member's invitation from Docker Hub: + +1. Sign in to [Docker Hub](https://hub.docker.com/). +1. Select **My Hub**, your organization, and then **Members**. +1. In the table, select the **Action** icon, and then select **Remove member** or **Remove invitee**. +1. Follow the on-screen instructions to remove the member or invitee. -1. In the [Admin Console](https://app.docker.com/admin), select your organization. -2. Select **Members**. -3. Select the **action menu** next to the invitee and select **Remove invitee**. -4. Select **Remove** to confirm. +You can also remove an invitation using the Docker Hub API. For more information, +see the [Cancel an invite](https://docs.docker.com/reference/api/hub/latest/#tag/invites/paths/~1v2~1invites~1%7Bid%7D/delete) API endpoint. {{< /tab >}} {{< /tabs >}} @@ -112,33 +279,35 @@ Use Docker Hub or the Admin Console to add or remove team members. Organization ### Add a member to a team {{< tabs >}} -{{< tab name="Docker Hub" >}} +{{< tab name="Admin Console" >}} -To add a member to a team with Docker Hub: +To add a member to a team with the Admin Console: -1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select **Organizations**, your organization, and then **Members**. -3. Select the **Action** icon, and then select **Add to team**. - - > [!NOTE] - > - > You can also navigate to **Organizations** > **Your Organization** > **Teams** > **Your Team Name** and select **Add Member**. Select a member from the drop-down list to add them to the team or search by Docker ID or email. -4. Select the team and then select **Add**. +1. Sign in to [Docker Home](https://app.docker.com/) and select your +organization. +1. Select **Teams**. +1. Select the team name. +1. Select **Add member**. You can add the member by searching for their email address or username. > [!NOTE] > > An invitee must first accept the invitation to join the organization before being added to the team. {{< /tab >}} -{{< tab name="Admin Console" >}} +{{< tab name="Docker Hub" >}} -{{< include "admin-early-access.md" >}} +{{% include "hub-org-management.md" %}} -To add a member to a team with the Admin Console: +To add a member to a team with Docker Hub: -1. In the [Admin Console](https://app.docker.com/admin), select your organization. -2. Select the team name. -3. Select **Add member**. You can add the member by searching for their email address or username. +1. Sign in to [Docker Hub](https://hub.docker.com). +1. Select **My Hub**, your organization, and then **Members**. +1. Select the **Action** icon, and then select **Add to team**. + + > [!NOTE] + > + > You can also navigate to **My Hub** > **Your Organization** > **Teams** > **Your Team Name** and select **Add Member**. Select a member from the drop-down list to add them to the team or search by Docker ID or email. +1. Select the team and then select **Add**. > [!NOTE] > @@ -147,31 +316,37 @@ To add a member to a team with the Admin Console: {{< /tab >}} {{< /tabs >}} -### Remove a member from a team +### Remove members from teams + +> [!NOTE] +> +> If your organization uses single sign-on (SSO) with [SCIM](/manuals/enterprise/security/provisioning/scim.md) enabled, you should remove members from your identity provider (IdP). This will automatically remove members from Docker. If SCIM is disabled, you must manually manage members in Docker. Organization owners can remove a member from a team in Docker Hub or Admin Console. Removing the member from the team will revoke their access to the permitted resources. {{< tabs >}} -{{< tab name="Docker Hub" >}} +{{< tab name="Admin Console" >}} -To remove a member from a specific team with Docker Hub: +To remove a member from a specific team with the Admin Console: -1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select **Organizations**, your organization, **Teams**, and then the team. -3. Select the **X** next to the user’s name to remove them from the team. -4. When prompted, select **Remove** to confirm. +1. Sign in to [Docker Home](https://app.docker.com/) and select your +organization. +1. Select **Teams**. +1. Select the team name. +1. Select the **X** next to the user's name to remove them from the team. +1. When prompted, select **Remove** to confirm. {{< /tab >}} -{{< tab name="Admin Console" >}} +{{< tab name="Docker Hub" >}} -{{< include "admin-early-access.md" >}} +{{% include "hub-org-management.md" %}} -To remove a member from a specific team with the Admin Console: +To remove a member from a specific team with Docker Hub: -1. In the [Admin Console](https://app.docker.com/admin), select your organization. -2. Select the team name. -3. Select the **X** next to the user's name to remove them from the team. -4. When prompted, select **Remove** to confirm. +1. Sign in to [Docker Hub](https://hub.docker.com). +1. Select **My Hub**, your organization, **Teams**, and then the team. +1. Select the **X** next to the user’s name to remove them from the team. +1. When prompted, select **Remove** to confirm. {{< /tab >}} {{< /tabs >}} @@ -184,22 +359,47 @@ the company owner can also manage that organization's roles. If you have SSO ena > [!NOTE] > -> If you're the only owner of an organization, -> you need to assign a new owner before you can edit your role. +> If you're the only owner of an organization, you need to assign a new owner +before you can edit your role. + +{{< tabs >}} +{{< tab name="Admin Console" >}} -To update a member role: +To update a member role in the Admin Console: + +1. Sign in to [Docker Home](https://app.docker.com/) and select your +organization. +1. Select **Members**. +1. Find the username of the member whose role you want to edit. Select the +**Actions** menu, then **Edit role**. + +{{< /tab >}} +{{< tab name="Docker Hub" >}} + +{{% include "hub-org-management.md" %}} + +To update a member role in Docker Hub: 1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select **Organizations**, your organization, and then **Members**. -3. Find the username of the member whose role you want to edit. In the table, select the **Actions** icon. -4. Select **Edit role**. -5. Select their organization, select the role you want to assign, and then select **Save**. +1. Select **My Hub**, your organization, and then **Members**. +1. Find the username of the member whose role you want to edit. In the table, select the **Actions** icon. +1. Select **Edit role**. +1. Select their organization, select the role you want to assign, and then select **Save**. + +> [!NOTE] +> +> If you're the only owner of an organization, +> you need to assign a new owner before you can edit your role. + +{{< /tab >}} +{{< /tabs >}} ## Export members CSV file {{< summary-bar feature_name="Admin orgs" >}} Owners can export a CSV file containing all members. The CSV file for a company contains the following fields: + - Name: The user's name - Username: The user's Docker ID - Email: The user's email address @@ -208,24 +408,25 @@ Owners can export a CSV file containing all members. The CSV file for a company - Account Created: The time and date when the user account was created {{< tabs >}} -{{< tab name="Docker Hub" >}} +{{< tab name="Admin Console" >}} To export a CSV file of your members: -1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select **Organizations**, your organization, and then **Members**. -3. Select the **Action** icon and then select **Export users as CSV**. +1. Sign in to [Docker Home](https://app.docker.com/) and select your +organization. +1. Select **Members**. +1. Select the **download** icon to export a CSV file of all members. {{< /tab >}} -{{< tab name="Admin Console" >}} +{{< tab name="Docker Hub" >}} -{{< include "admin-early-access.md" >}} +{{% include "hub-org-management.md" %}} To export a CSV file of your members: -1. In the [Admin Console](https://app.docker.com/admin), select your organization. -2. Select **Members**. -3. Select the **download** icon to export a CSV file of all members. +1. Sign in to [Docker Hub](https://hub.docker.com). +1. Select **My Hub**, your organization, and then **Members**. +1. Select the **Action** icon and then select **Export users as CSV**. {{< /tab >}} -{{< /tabs >}} \ No newline at end of file +{{< /tabs >}} diff --git a/content/manuals/admin/organization/onboard.md b/content/manuals/admin/organization/onboard.md index 01f81e0c19ab..0d963316e37c 100644 --- a/content/manuals/admin/organization/onboard.md +++ b/content/manuals/admin/organization/onboard.md @@ -2,9 +2,9 @@ title: Onboard your organization weight: 20 description: Get started onboarding your Docker Team or Business organization. -keywords: business, team, organizations, get started, onboarding +keywords: business, team, organizations, get started, onboarding, Admin Console, organization management, toc_min: 1 -toc_max: 2 +toc_max: 3 aliases: - /docker-hub/onboard/ - /docker-hub/onboard-team/ @@ -13,91 +13,163 @@ aliases: {{< summary-bar feature_name="Admin orgs" >}} -{{< include "admin-early-access.md" >}} +Learn how to onboard your organization using the Admin Console or Docker Hub. -Learn how to onboard your organization using Docker Hub or the Docker Admin Console. +Onboarding your organization includes: -Onboarding your organization lets administrators gain visibility into user activity and enforce security settings. In addition, members of your organization receive increased pull limits and other organization wide benefits. For more details, see [Docker subscriptions and features](../../subscription/details.md). - -In this guide, you'll learn how to do the following: - -- Identify your users to help you efficiently allocate your subscription seats +- Identifying users to help you allocate your subscription seats - Invite members and owners to your organization -- Secure authentication and authorization for your organization using Single Sign-On (SSO) and System for Cross-domain Identity Management (SCIM) -- Enforce sign-on for Docker Desktop to ensure security best practices +- Secure authentication and authorization for your organization +- Enforce sign-in for Docker Desktop to ensure security best practices + +These actions help administrators gain visibility into user activity and +enforce security settings. Organization memebers also receive increased pull +limits and other benefits when they are signed in. ## Prerequisites -Before you start to onboard your organization, ensure that you: +Before you start onboarding your organization, ensure you: -- Have a Docker Team or Business subscription. See [Docker Pricing](https://www.docker.com/pricing/) for details. +- Have a Docker Team or Business subscription. For more details, see +[Docker subscriptions and features](/manuals/subscription/details.md). > [!NOTE] > - > When purchasing a self-serve subscription, the on-screen instructions guide you through creating an organization. If you have purchased a subscription through Docker Sales and you have not yet created an organization, see [Create an organization](/admin/organization/orgs). - -- Familiarize yourself with Docker concepts and terminology in the [glossary](/glossary/) and [FAQs](/faq/admin/general-faqs/). - -## Step 1: Identify your Docker users + > When purchasing a self-serve subscription, the on-screen instructions + guide you through creating an organization. If you have purchased a + subscription through Docker Sales and you have not yet created an + organization, see [Create an organization](/manuals/admin/organization/orgs.md). -Identifying your users will ensure that you allocate your subscription seats efficiently and that all your Docker users receive the benefits of your subscription. +- Familiarize yourself with Docker concepts and terminology in +the [administration overview](../_index.md). -1. Identify the Docker users in your organization. - - If your organization uses device management software, like MDM or Jamf, you may use the device management software to help identify Docker users. See your device management software's documentation for details. You can identify Docker users by checking if Docker Desktop is installed at the following location on each user's machine: - - Mac: `/Applications/Docker.app` - - Windows: `C:\Program Files\Docker\Docker` - - Linux: `/opt/docker-desktop` - - If your organization doesn't use device management software or your users haven't installed Docker Desktop yet, you may survey your users. -2. Instruct all your organization's Docker users to update their existing Docker account's email address to an address that's in your organization's domain, or to create a new account using an email address in your organization's domain. - - To update an account's email address, instruct your users to sign in to [Docker Hub](https://hub.docker.com), and update the email address to their email address in your organization's domain. - - To create a new account, instruct your users to go [sign up](https://hub.docker.com/signup) using their email address in your organization's domain. -3. Ask your Docker sales representative or [contact sales](https://www.docker.com/pricing/contact-sales/) to get a list of Docker accounts that use an email address in your organization's domain. +## Onboard with guided setup -## Step 2: Invite owners +The Admin Console has a guided setup to help you +onboard your organization. The guided setup's steps consist of basic onboarding +tasks. If you want to onboard outside of the guided setup, +see [Recommended onboarding steps](/manuals/admin/organization/onboard.md#recommended-onboarding-steps). -When you create an organization, you are the only owner. It is optional to add additional owners. Owners can help you onboard and manage your organization. +To onboard using the guided setup, +navigate to the [Admin Console](https://app.docker.com) and +select **Guided setup** in the left-hand navigation. -To add an owner, invite a user and assign them the owner role. For more details, see [Invite members](/admin/organization/members/). +The guided setup walks you through the following onboarding steps: -## Step 3: Invite members +- **Invite your team**: Invite owners and members. +- **Manage user access**: Add and verify a domain, manage users with SSO, and +enforce Docker Desktop sign-in. +- **Docker Desktop security**: Configure image access management, registry +access management, and settings management. -When you add users to your organization, you gain visibility into their activity and you can enforce security settings. In addition, members of your organization receive increased pull limits and other organization wide benefits. +## Recommended onboarding steps -To add a member, invite a user and assign them the member role. For more details, see [Invite members](/admin/organization/members/). +### Step one: Identify your Docker users -## Step 4: Manage members with SSO and SCIM +Identifying your users helps you allocate seats efficiently and ensures they +receive your Docker subscription benefits. -Configuring SSO and SCIM is optional and only available to Docker Business subscribers. To upgrade a Docker Team subscription to a Docker Business subscription, see [Upgrade your subscription](/subscription/upgrade/). - -You can manage your members in your identity provider and automatically provision them to your Docker organization with SSO and SCIM. See the following for more details. - - [Configure SSO](/manuals/security/for-admins/single-sign-on/configure.md) to authenticate and add members when they sign in to Docker through your identity provider. - - Optional. [Enforce SSO](/manuals/security/for-admins/single-sign-on/connect.md) to ensure that when users sign in to Docker, they must use SSO. +1. Identify the Docker users in your organization. + - If your organization uses device management software, like MDM or Jamf, + you can use the device management software to help identify Docker users. + See your device management software's documentation for details. You can + identify Docker users by checking if Docker Desktop is installed at the + following location on each user's machine: + - Mac: `/Applications/Docker.app` + - Windows: `C:\Program Files\Docker\Docker` + - Linux: `/opt/docker-desktop` + - If your organization doesn't use device management software or your + users haven't installed Docker Desktop yet, you can survey your users to + identify who is using Docker Desktop. +1. Ask users to update their Docker account's email address to one associated +with your organization's domain, or create a new account with that email. + - To update an account's email address, instruct your users to sign in + to [Docker Hub](https://hub.docker.com), and update the email address to + their email address in your organization's domain. + - To create a new account, instruct your users to + [sign up](https://hub.docker.com/signup) using their email address associated + with your organization's domain. +1. Identify Docker accounts associated with your organization's domain: + - Ask your Docker sales representative or + [contact sales](https://www.docker.com/pricing/contact-sales/) to get a list + of Docker accounts that use an email address in your organization's domain. + +### Step two: Invite owners + +Owners can help you onboard and manage your organization. + +When you create an organization, you are the only owner. It is optional to +add additional owners. + +To add an owner, invite a user and assign them the owner role. For more +details, see [Invite members](/manuals/admin/organization/members.md) and +[Roles and permissions](/manuals/enterprise/security/roles-and-permissions.md). + +### Step three: Invite members + +When you add users to your organization, you gain visibility into their +activity and you can enforce security settings. Your members also +receive increased pull limits and other organization wide benefits when +they are signed in. + +To add a member, invite a user and assign them the member role. +For more details, see [Invite members](/manuals/admin/organization/members.md) and +[Roles and permissions](/manuals/enterprise/security/roles-and-permissions.md). + +### Step four: Manage user access with SSO and SCIM + +Configuring SSO and SCIM is optional and only available to Docker Business +subscribers. To upgrade a Docker Team subscription to a Docker Business +subscription, see [Change your subscription](/manuals/subscription/change.md). + +Use your identity provider (IdP) to manage members and provision them to Docker +automatically via SSO and SCIM. See the following for more details: + + - [Configure SSO](/manuals/enterprise/security/single-sign-on/configure.md) + to authenticate and add members when they sign in to Docker through your + identity provider. + - Optional. + [Enforce SSO](/manuals/enterprise/security/single-sign-on/connect.md) to + ensure that when users sign in to Docker, they must use SSO. > [!NOTE] > - > Enforcing single sign-on (SSO) and [Step 5: Enforce sign-in for Docker - > Desktop](#step-5-enforce-sign-in-for-docker-desktop) are different - > features. For more details, see - > [Enforcing sign-in versus enforcing single sign-on (SSO)](/security/for-admins/enforce-sign-in/#enforcing-sign-in-versus-enforcing-single-sign-on-sso). + > Enforcing single sign-on (SSO) and enforcing Docker Desktop sign in + are different features. For more details, see + > [Enforcing sign-in versus enforcing single sign-on (SSO)](/manuals/enterprise/security/enforce-sign-in/_index.md#enforcing-sign-in-versus-enforcing-single-sign-on-sso). - - [Configure SCIM](/security/for-admins/provisioning/scim/) to automatically provision, add, and de-provision members to Docker through your identity provider. + - [Configure SCIM](/manuals/enterprise/security/provisioning/scim.md) to + automatically provision, add, and de-provision members to Docker through + your identity provider. -## Step 5: Enforce sign-in for Docker Desktop +### Step five: Enforce sign-in for Docker Desktop By default, members of your organization can use Docker Desktop without signing in. When users don’t sign in as a member of your organization, they don’t -receive the [benefits of your organization’s subscription](../../subscription/details.md) and they can circumvent [Docker’s security features](/security/for-admins/hardened-desktop/). +receive the +[benefits of your organization’s subscription](../../subscription/details.md) +and they can circumvent [Docker’s security features](/manuals/enterprise/security/hardened-desktop/_index.md). + +There are multiple ways you can enforce sign-in, depending on your organization's +Docker configuration: +- [Registry key method (Windows only)](/manuals/enterprise/security/enforce-sign-in/methods.md#registry-key-method-windows-only) +- [`.plist` method (Mac only)](/manuals/enterprise/security/enforce-sign-in/methods.md#plist-method-mac-only) +- [`registry.json` method (All)](/manuals/enterprise/security/enforce-sign-in/methods.md#registryjson-method-all) + +### Step six: Manage Docker Desktop security + +Docker offers the following security features to manage your organization's +security posture: -There are multiple ways you can enforce sign-in, depending on your company's -set up and preferences: -- [Registry key method (Windows only)](/security/for-admins/enforce-sign-in/methods/#registry-key-method-windows-only) -- [`.plist` method (Mac only)](/security/for-admins/enforce-sign-in/methods/#plist-method-mac-only) -- [`registry.json` method (All)](/security/for-admins/enforce-sign-in/methods/#registryjson-method-all) +- [Image Access Management](/manuals/enterprise/security/hardened-desktop/image-access-management.md): Control which types of images your developers can pull from Docker Hub. +- [Registry Access Management](/manuals/enterprise/security/hardened-desktop/registry-access-management.md): Define which registries your developers can access. +- [Settings management](/manuals/enterprise/security/hardened-desktop/settings-management.md): Set and control Docker Desktop settings for your users. ## What's next - [Manage Docker products](./manage-products.md) to configure access and view usage. - Configure [Hardened Docker Desktop](/desktop/hardened-desktop/) to improve your organization’s security posture for containerized development. -- [Audit your domains](/docker-hub/domain-audit/) to ensure that all Docker users in your domain are part of your organization. +- [Manage your domains](/manuals/enterprise/security/domain-management.md) to ensure that all Docker users in your domain are part of your organization. -Your Docker subscription provides many more additional features. To learn more, see [Docker subscriptions and features](/subscription/details/). +Your Docker subscription provides many more additional features. To learn more, +see [Docker subscriptions and features](/subscription/details/). diff --git a/content/manuals/admin/organization/orgs.md b/content/manuals/admin/organization/orgs.md index ceea6a7f34db..6f125097608d 100644 --- a/content/manuals/admin/organization/orgs.md +++ b/content/manuals/admin/organization/orgs.md @@ -2,23 +2,29 @@ title: Create your organization weight: 10 description: Learn how to create an organization. -keywords: Docker, docker, registry, teams, organizations, plans, Dockerfile, Docker - Hub, docs, documentation +keywords: docker organizations, organization, create organization, docker teams, docker admin console, organization management aliases: -- /docker-hub/orgs/ + - /docker-hub/orgs/ --- {{< summary-bar feature_name="Admin orgs" >}} -This section describes how to create an organization. Before you begin: +This page describes how to create an organization. + +## Prerequisites + +Before you begin creating an organization: - You need a [Docker ID](/accounts/create-account/) -- Review the [Docker subscriptions and features](../../subscription/details.md) to determine what plan to choose for your organization +- Review the [Docker subscriptions and features](../../subscription/details.md) + to determine what subscription to choose for your organization ## Create an organization There are multiple ways to create an organization. You can either: -- Create a new organization using the **Create Organization** option in Docker Hub + +- Create a new organization using the **Create Organization** option in the +Admin Console or Docker Hub - Convert an existing user account to an organization The following section contains instructions on how to create a new organization. For prerequisites and @@ -26,13 +32,17 @@ detailed instructions on converting an existing user account to an organization, [Convert an account into an organization](/manuals/admin/organization/convert-account.md). {{< tabs >}} -{{< tab name="Docker Hub" >}} +{{< tab name="Admin Console" >}} -1. Sign in to [Docker Hub](https://hub.docker.com/) using your Docker ID, your email address, or your social provider. -2. Select **Organizations** and then **Create Organization** to create a new organization. -3. Choose a plan for your organization, a billing cycle, and specify how many seats you need. See [Docker Pricing](https://www.docker.com/pricing/) for details on the features offered in the Team and Business plan. -4. Select **Continue to profile**. -5. Enter an **Organization namespace**. This is the official, unique name for +To create an organization: + +1. Sign in to [Docker Home](https://app.docker.com/) and navigate to the bottom +of the organization list. +1. Select **Create new organization**. +1. Choose a subscription for your organization, a billing cycle, and specify how many seats you need. See [Docker Pricing](https://www.docker.com/pricing/) for details on the features offered in the Team and Business subscription. +1. Select **Continue to profile**. +1. Select **Create an organization** to create a new one. +1. Enter an **Organization namespace**. This is the official, unique name for your organization in Docker Hub. It's not possible to change the name of the organization after you've created it. @@ -40,29 +50,26 @@ organization after you've created it. > > You can't use the same name for the organization and your Docker ID. If you want to use your Docker ID as the organization name, then you must first [convert your account into an organization](/manuals/admin/organization/convert-account.md). -6. Enter your **Company name**. This is the full name of your company. Docker +1. Enter your **Company name**. This is the full name of your company. Docker displays the company name on your organization page and in the details of any public images you publish. You can update the company name anytime by navigating to your organization's **Settings** page. -7. Select **Continue to billing** to continue. -8. Enter your organization's billing information and select **Continue to payment** to continue to the billing portal. -9. Provide your card details and select **Purchase**. +1. Select **Continue to billing** to continue. +1. Enter your organization's billing information and select **Continue to payment** to continue to the billing portal. +1. Provide your payment details and select **Purchase**. You've now created an organization. {{< /tab >}} -{{< tab name="Admin Console" >}} - -{{< include "admin-early-access.md" >}} +{{< tab name="Docker Hub" >}} -To create an organization: +{{% include "hub-org-management.md" %}} -1. Sign in to [Docker Home](https://app.docker.com/). -2. Under Settings and administration, select **Go to Admin Console**. -3. Select the **Organization** drop-down in the left-hand navigation and then **Create Organization**. -4. Choose a plan for your organization, a billing cycle, and specify how many seats you need. See [Docker Pricing](https://www.docker.com/pricing/) for details on the features offered in the Team and Business plan. -5. Select **Continue to profile**. -6. Enter an **Organization namespace**. This is the official, unique name for +1. Sign in to [Docker Hub](https://hub.docker.com/) using your Docker ID, your email address, or your social provider. +1. Select **My Hub**, select the account drop-down, and then **Create Organization** to create a new organization. +1. Choose a subscription for your organization, a billing cycle, and specify how many seats you need. See [Docker Pricing](https://www.docker.com/pricing/) for details on the features offered in the Team and Business subscription. +1. Select **Continue to profile**. +1. Enter an **Organization namespace**. This is the official, unique name for your organization in Docker Hub. It's not possible to change the name of the organization after you've created it. @@ -70,13 +77,13 @@ organization after you've created it. > > You can't use the same name for the organization and your Docker ID. If you want to use your Docker ID as the organization name, then you must first [convert your account into an organization](/manuals/admin/organization/convert-account.md). -7. Enter your **Company name**. This is the full name of your company. Docker +1. Enter your **Company name**. This is the full name of your company. Docker displays the company name on your organization page and in the details of any public images you publish. You can update the company name anytime by navigating to your organization's **Settings** page. -8. Select **Continue to billing** to continue. -9. Enter your organization's billing information and select **Continue to payment** to continue to the billing portal. -10. Provide your card details and select **Purchase**. +1. Select **Continue to billing** to continue. +1. Enter your organization's billing information and select **Continue to payment** to continue to the billing portal. +1. Provide your card details and select **Purchase**. You've now created an organization. @@ -86,12 +93,26 @@ You've now created an organization. ## View an organization {{< tabs >}} +{{< tab name="Admin Console" >}} + +To view an organization in the Admin Console: + +1. Sign in to [Docker Home](https://app.docker.com) and select your +organization. +1. From the left-hand navigation menu, select **Admin Console**. + +The Admin Console contains many options that let you to +configure your organization. + +{{< /tab >}} {{< tab name="Docker Hub" >}} +{{% include "hub-org-management.md" %}} + To view an organization: -1. Sign in to [Docker Hub](https://hub.docker.com) with a user account that is a member of any team in the - organization. +1. Sign in to [Docker Hub](https://hub.docker.com) with a user account that is + a member of any team in the organization. > [!NOTE] > @@ -105,7 +126,7 @@ To view an organization: > then you are neither a member or an owner of it. An organization > administrator needs to add you as a member of the organization. -2. Select **Organizations** in the top navigation bar, then choose your +1. Select **My Hub** in the top navigation bar, then choose your organization from the list. The organization landing page displays various options that let you to @@ -113,56 +134,21 @@ configure your organization. - **Members**: Displays a list of team members. You can invite new members using the **Invite members** button. See [Manage members](./members.md) for details. - - **Teams**: Displays a list of existing teams and the number of members in each team. See [Create a team](./manage-a-team.md) for details. - - **Repositories**: Displays a list of repositories associated with the organization. See [Repositories](../../docker-hub/repos/_index.md) for detailed information about working with repositories. - - **Activity** Displays the audit logs, a chronological list of activities that occur at organization and repository levels. It provides the org owners a report of all their team member activities. See [Audit logs](./activity-logs.md) for details. - - **Settings**: Displays information about your organization, and you to view and change your repository privacy settings, configure org permissions such as - [Image Access Management](/manuals/security/for-admins/hardened-desktop/image-access-management.md), configure notification settings, and [deactivate](../deactivate-account.md#deactivate-an-organization) You can also update your organization name and company name that appear on your organization landing page. You must be an owner to access the - organization's **Settings** page. - + [Image Access Management](/manuals/enterprise/security/hardened-desktop/image-access-management.md), configure notification settings, and [deactivate](/manuals/admin/organization/deactivate-account.md#deactivate-an-organization) You can also update your organization name and company name that appear on your organization landing page. You must be an owner to access the organization's **Settings** page. - **Billing**: Displays information about your existing -[Docker subscription (plan)](../../subscription/_index.md), including the number of seats and next payment due date. For how to access the billing history and payment methods for your organization, see [View billing history](../../billing/history.md). - -{{< /tab >}} -{{< tab name="Admin Console" >}} - -{{< include "admin-early-access.md" >}} - -To view an organization in the Admin Console: - -1. Sign in to [Docker Home](https://app.docker.com). -2. Under Settings and administration, select **Go to Admin Console**. -3. Select your organization from the **Organization** drop-down in the left-hand navigation. - -The Admin Console displays various options that let you to -configure your organization. - -- **Members**: Displays a list of team members. You - can invite new members using the **Invite members** button. See [Manage members](./members.md) for details. - -- **Teams**: Displays a list of existing teams and the number of - members in each team. See [Create a team](./manage-a-team.md) for details. - -- **Activity** Displays the audit logs, a chronological list of activities that - occur at organization and repository levels. It provides the org owners a - report of all their team member activities. See [Audit logs](./activity-logs.md) for - details. - -- **Security and access**: Manage security settings. For more information, see [Security](/manuals/security/_index.md). - -- **Organization settings**: Update general settings, manage your company settings, or [deactivate your organization](/manuals/admin/deactivate-account.md). + [Docker subscription](../../subscription/_index.md), including the number of seats and next payment due date. For how to access the billing history and payment methods for your organization, see [View billing history](../../billing/history.md). {{< /tab >}} {{< /tabs >}} @@ -171,21 +157,23 @@ configure your organization. > [!WARNING] > -> If you are merging organizations, it is recommended to do so at the *end* of +> If you are merging organizations, it is recommended to do so at the _end_ of > your billing cycle. When you merge an organization and downgrade another, you > will lose seats on your downgraded organization. Docker does not offer > refunds for downgrades. -If you have multiple organizations that you want to merge into one, complete the following: +If you have multiple organizations that you want to merge into one, complete +the following steps: 1. Based on the number of seats from the secondary organization, [purchase additional seats](../../subscription/manage-seats.md) for the primary organization account that you want to keep. -2. Manually add users to the primary organization and remove existing users from the secondary organization. -3. Manually move over your data, including all repositories. -4. Once you're done moving all of your users and data, [downgrade](../../subscription/change.md) the secondary account to a free subscription. Note that Docker does not offer refunds for downgrading organizations mid-billing cycle. +1. Manually add users to the primary organization and remove existing users from the secondary organization. +1. Manually move over your data, including all repositories. +1. Once you're done moving all of your users and data, [downgrade](../../subscription/change.md) the secondary account to a free subscription. Note that Docker does not offer refunds for downgrading organizations mid-billing cycle. > [!TIP] > -> If your organization has a Docker Business subscription with a purchase order, contact Support or your Account Manager at Docker. +> If your organization has a Docker Business subscription with a purchase +order, contact Support or your Account Manager at Docker. ## More resources diff --git a/content/manuals/ai/compose/_index.md b/content/manuals/ai/compose/_index.md new file mode 100644 index 000000000000..a861d426a60e --- /dev/null +++ b/content/manuals/ai/compose/_index.md @@ -0,0 +1,9 @@ +--- +build: + render: never +title: AI and Docker Compose +weight: 40 +params: + sidebar: + group: AI +--- \ No newline at end of file diff --git a/content/manuals/ai/compose/models-and-compose.md b/content/manuals/ai/compose/models-and-compose.md new file mode 100644 index 000000000000..737b4b217af2 --- /dev/null +++ b/content/manuals/ai/compose/models-and-compose.md @@ -0,0 +1,228 @@ +--- +title: Define AI Models in Docker Compose applications +linkTitle: Use AI models in Compose +description: Learn how to define and use AI models in Docker Compose applications using the models top-level element +keywords: compose, docker compose, models, ai, machine learning, cloud providers, specification +aliases: + - /compose/how-tos/model-runner/ + - /ai/compose/model-runner/ +weight: 10 +params: + sidebar: + badge: + color: green + text: New +--- + +{{< summary-bar feature_name="Compose models" >}} + +Compose lets you define AI models as core components of your application, so you can declare model dependencies alongside services and run the application on any platform that supports the Compose Specification. + +## Prerequisites + +- Docker Compose v2.38 or later +- A platform that supports Compose models such as Docker Model Runner (DMR) or compatible cloud providers. + If you are using DMR, see the [requirements](/manuals/ai/model-runner/_index.md#requirements). + +## What are Compose models? + +Compose `models` are a standardized way to define AI model dependencies in your application. By using the [`models` top-level element](/reference/compose-file/models.md) in your Compose file, you can: + +- Declare which AI models your application needs +- Specify model configurations and requirements +- Make your application portable across different platforms +- Let the platform handle model provisioning and lifecycle management + +## Basic model definition + +To define models in your Compose application, use the `models` top-level element: + +```yaml +services: + chat-app: + image: my-chat-app + models: + - llm + +models: + llm: + model: ai/smollm2 +``` + +This example defines: +- A service called `chat-app` that uses a model named `llm` +- A model definition for `llm` that references the `ai/smollm2` model image + +## Model configuration options + +Models support various configuration options: + +```yaml +models: + llm: + model: ai/smollm2 + context_size: 1024 + runtime_flags: + - "--a-flag" + - "--another-flag=42" +``` + +Common configuration options include: +- `model` (required): The OCI artifact identifier for the model. This is what Compose pulls and runs via the model runner. +- `context_size`: Defines the maximum token context size for the model. + + > [!NOTE] + > Each model has its own maximum context size. When increasing the context length, + > consider your hardware constraints. In general, try to keep context size + > as small as feasible for your specific needs. + +- `runtime_flags`: A list of raw command-line flags passed to the inference engine when the model is started. + For example, if you use llama.cpp, you can pass any of [the available parameters](https://github.com/ggml-org/llama.cpp/blob/master/tools/server/README.md). +- Platform-specific options may also be available via extension attributes `x-*` + +## Service model binding + +Services can reference models in two ways: short syntax and long syntax. + +### Short syntax + +The short syntax is the simplest way to bind a model to a service: + +```yaml +services: + app: + image: my-app + models: + - llm + - embedding-model + +models: + llm: + model: ai/smollm2 + embedding-model: + model: ai/all-minilm +``` + +With short syntax, the platform automatically generates environment variables based on the model name: +- `LLM_URL` - URL to access the llm model +- `LLM_MODEL` - Model identifier for the llm model +- `EMBEDDING_MODEL_URL` - URL to access the embedding-model +- `EMBEDDING_MODEL_MODEL` - Model identifier for the embedding-model + +### Long syntax + +The long syntax allows you to customize environment variable names: + +```yaml +services: + app: + image: my-app + models: + llm: + endpoint_var: AI_MODEL_URL + model_var: AI_MODEL_NAME + embedding-model: + endpoint_var: EMBEDDING_URL + model_var: EMBEDDING_NAME + +models: + llm: + model: ai/smollm2 + embedding-model: + model: ai/all-minilm +``` + +With this configuration, your service receives: +- `AI_MODEL_URL` and `AI_MODEL_NAME` for the LLM model +- `EMBEDDING_URL` and `EMBEDDING_NAME` for the embedding model + +## Platform portability + +One of the key benefits of using Compose models is portability across different platforms that support the Compose specification. + +### Docker Model Runner + +When [Docker Model Runner is enabled](/manuals/ai/model-runner/_index.md): + +```yaml +services: + chat-app: + image: my-chat-app + models: + llm: + endpoint_var: AI_MODEL_URL + model_var: AI_MODEL_NAME + +models: + llm: + model: ai/smollm2 + context_size: 4096 + runtime_flags: + - "--no-prefill-assistant" +``` + +Docker Model Runner will: +- Pull and run the specified model locally +- Provide endpoint URLs for accessing the model +- Inject environment variables into the service + +#### Alternative configuration with provider services + +> [!TIP] +> +> This approach is deprecated. Use the [`models` top-level element](#basic-model-definition) instead. + +You can also use the `provider` service type, which allows you to declare platform capabilities required by your application. +For AI models, you can use the `model` type to declare model dependencies. + +To define a model provider: + +```yaml +services: + chat: + image: my-chat-app + depends_on: + - ai_runner + + ai_runner: + provider: + type: model + options: + model: ai/smollm2 + context-size: 1024 + runtime-flags: "--no-prefill-assistant" +``` + + +### Cloud providers + +The same Compose file can run on cloud providers that support Compose models: + +```yaml +services: + chat-app: + image: my-chat-app + models: + - llm + +models: + llm: + model: ai/smollm2 + # Cloud-specific configurations + x-cloud-options: + - "cloud.instance-type=gpu-small" + - "cloud.region=us-west-2" +``` + +Cloud providers might: +- Use managed AI services instead of running models locally +- Apply cloud-specific optimizations and scaling +- Provide additional monitoring and logging capabilities +- Handle model versioning and updates automatically + +## Reference + +- [`models` top-level element](/reference/compose-file/models.md) +- [`models` attribute](/reference/compose-file/services.md#models) +- [Docker Model Runner documentation](/manuals/ai/model-runner.md) +- [Compose Model Runner documentation](/manuals/ai/compose/models-and-compose.md) diff --git a/content/manuals/desktop/features/gordon.md b/content/manuals/ai/gordon/_index.md similarity index 58% rename from content/manuals/desktop/features/gordon.md rename to content/manuals/ai/gordon/_index.md index 469a9ac6ee62..9b0f07a07b13 100644 --- a/content/manuals/desktop/features/gordon.md +++ b/content/manuals/ai/gordon/_index.md @@ -7,41 +7,40 @@ params: badge: color: blue text: Beta + group: AI +aliases: + - /desktop/features/gordon/ --- -{{% restricted title=Beta %}} -Ask Gordon is a [Beta](/manuals/release-lifecycle.md) feature, and only members -of the Ask Gordon beta program can access it. Features, user interface, and -behavior are subject to change in future releases. - -{{< button text="Apply for access" url="https://docker.qualtrics.com/jfe/form/SV_dmVHFjQ4fZlrEOy" >}} -{{% /restricted %}} +{{< summary-bar feature_name="Ask Gordon" >}} Ask Gordon is your personal AI assistant embedded in Docker Desktop and the -Docker CLI. It's designed to streamline your workflow and help you make the -most of the Docker ecosystem. +Docker CLI. It's designed to streamline your workflow and help you make the most +of the Docker ecosystem. -## What is Ask Gordon? +## Key features -Ask Gordon is a suite of AI-powered capabilities integrated into Docker's -tools. These features, currently in Beta, are not enabled by default, and are -not production-ready. You may also encounter the term "Docker AI" as a broader -reference to this technology. +Ask Gordon provides AI-powered assistance in Docker tools. It can: + +- Improve Dockerfiles +- Run and troubleshoot containers +- Interact with your images and code +- Find vulnerabilities or configuration issues +- Migrate a Dockerfile to use [Docker Hardened Images](/manuals/dhi/_index.md) + +It understands your local environment, including source code, Dockerfiles, and +images, to provide personalized and actionable guidance. -The goal of Ask Gordon is to make Docker's tools for managing images and -containers more intuitive and accessible. It provides contextual assistance -tailored to your local environment, including Dockerfiles, containers, and -applications. +Ask Gordon remembers conversations, allowing you to switch topics more easily. -Ask Gordon integrates directly with Docker's tools to help you perform specific -tasks. It understands your local setup, such as your local source code and -images. For example, you can ask Gordon to help you identify vulnerabilities in -your project or how to optimize a Dockerfile in your local repository. This -tight integration ensures responses are practical and actionable. +Ask Gordon is not enabled by default, and is not +production-ready. You may also encounter the term "Docker AI" as a broader +reference to this technology. -> [!NOTE] -> Ask Gordon is powered by Large Language Models (LLMs). Like all LLM-based -> tools, its responses may sometimes be inaccurate. Always verify the +> [!NOTE] +> +> Ask Gordon is powered by Large Language Models (LLMs). Like all +> LLM-based tools, its responses may sometimes be inaccurate. Always verify the > information provided. ### What data does Gordon access? @@ -49,19 +48,21 @@ tight integration ensures responses are practical and actionable. When you use Ask Gordon, the data it accesses depends on the context of your query: -- Local files: If you use the `docker ai` command, Ask Gordon can access - files and directories in the current working directory where the command is +- Local files: If you use the `docker ai` command, Ask Gordon can access files + and directories in the current working directory where the command is executed. In Docker Desktop, if you ask about a specific file or directory in the **Ask Gordon** view, you'll be prompted to select the relevant context. -- Local images: Gordon integrates with Docker Desktop and can view all images - in your local image store. This includes images you've built or pulled from a +- Local images: Gordon integrates with Docker Desktop and can view all images in + your local image store. This includes images you've built or pulled from a registry. To provide accurate responses, Ask Gordon may send relevant files, directories, or image metadata to the Gordon backend along with your query. This data -transfer occurs over the network but is never stored persistently or shared -with third parties. It is used exclusively to process your request and -formulate a response. +transfer occurs over the network but is never stored persistently or shared with +third parties. It is used exclusively to process your request and formulate a +response. For more information about privacy terms and conditions for Docker AI, +review [Gordon's Supplemental +Terms](https://www.docker.com/legal/docker-ai-supplemental-terms/). All data transferred is encrypted in transit. @@ -86,81 +87,44 @@ making it more effective for all users. If you have concerns about data collection or usage, you can [disable](#disable-ask-gordon) the feature at any time. -## Setup - -To use this feature, you must have: - -- [Access to the Ask Gordon beta program](https://docker.qualtrics.com/jfe/form/SV_dmVHFjQ4fZlrEOy). - -- Docker Desktop version 4.37 or later. +## Enable Ask Gordon -Ask Gordon is not enabled by default. After having received access to the beta -program, you must enable the feature: - -1. [Sign in](#sign-in) to your Docker account. -2. [Enable the feature](#enable-the-feature) in the Docker Desktop settings. -3. [Accept the terms of service](#accept-the-terms-of-service). - -### Sign in - -1. Open Docker Desktop. -2. Select the **Sign in** button. -3. Complete the sign-in process in your web browser. - -### Enable the feature - -After signing in to your Docker Account, enable the Docker AI feature: - -1. Open the **Settings** view in Docker Desktop. -2. Navigate to **Features in development**. +1. Sign in to your Docker account. +2. Navigate to the **Beta features** tab in settings. 3. Check the **Enable Docker AI** checkbox. -4. Select **Apply & restart**. -### Accept the terms of service + The Docker AI terms of service agreement is displayed. You must agree to the + terms before you can enable the feature. Review the terms and select **Accept + and enable** to continue. -To start using Docker AI, you need to accept the terms of service. You can do -this in one of two ways: +4. Select **Apply**. -- Open the **Ask Gordon** view in Docker Desktop and ask a question. -- Use the `docker ai` CLI command to issue a query. - -The first time you interact with Docker AI, you'll see a prompt to accept the -terms of service. For example: - -```console -$ docker ai what can you do? - - Before using Gordon, please accept the terms of service -``` - -After accepting the terms, you can begin using Ask Gordon. +> [!IMPORTANT] +> +> For Docker Desktop versions 4.41 and earlier, this settings lived under the **Experimental features** tab on the **Features in development** page. ## Using Ask Gordon -The primary interfaces to Docker's AI capabilities are through the **Ask -Gordon** view in Docker Desktop, or if you prefer to use the CLI: the `docker -ai` CLI command. - -If you've used an AI chatbot before, these interfaces will be pretty familiar -to you. You can chat with the Docker AI to get help with your Docker tasks. +You can access Gordon: -### Contextual help +- In Docker Desktop, in the **Ask Gordon** view. +- Via the Docker CLI, with the `docker ai` CLI command. -Once you've enabled the Docker AI features, you'll also find references to -**Ask Gordon** in various other places throughout the Docker Desktop user -interface. Whenever you encounter a button with the "sparkles" (✨) icon in the -user interface, you can use the button to get contextual support from Ask -Gordon. +Once you've enabled the Docker AI features, you'll also find references to **Ask +Gordon** in various other places throughout the Docker Desktop user interface. +Whenever you encounter a button with the **Sparkles** (✨) icon in the user +interface, you can use the button to get contextual support from Ask Gordon. ## Example workflows Ask Gordon is a general-purpose AI assistant created to help you with all your -Docker-related tasks and workflows. If you need some inspiration, here are a -few ways things you can try: +Docker-related tasks and workflows. If you need some inspiration, here are a few +ways things you can try: - [Troubleshoot a crashed container](#troubleshoot-a-crashed-container) - [Get help with running a container](#get-help-with-running-a-container) - [Improve a Dockerfile](#improve-a-dockerfile) +- [Migrate a Dockerfile to DHI](#migrate-a-dockerfile-to-dhi) For more examples, try asking Gordon directly. For example: @@ -199,10 +163,10 @@ able to help you get set up: 2. Open the **Images** view in Docker Desktop and select the image. 3. Select the **Run** button. -In the _Run a new container_ dialog that opens, you should see a message about +In the **Run a new container** dialog, you should see a message about **Ask Gordon**. -![Ask Gordon hint in Docker Desktop](../images/gordon-run-ctr.png) +![Ask Gordon hint in Docker Desktop](../../images/gordon-run-ctr.png) The linked text in the hint is a suggested prompt to start a conversation with Ask Gordon. @@ -236,17 +200,33 @@ across several dimensions: - Portability - Resource efficiency +### Migrate a Dockerfile to DHI + +Migrating your Dockerfile to use [Docker Hardened Images](/manuals/dhi/_index.md) helps you build +more secure, minimal, and production-ready containers. DHIs are designed to +reduce vulnerabilities, enforce best practices, and simplify compliance, making +them a strong foundation for secure software supply chains. + +To request Gordon's help for the migration: + +{{% include "gordondhi.md" %}} + ## Disable Ask Gordon +### For individual users + If you've enabled Ask Gordon and you want to disable it again: 1. Open the **Settings** view in Docker Desktop. -2. Navigate to **Features in development**. +2. Navigate to **Beta features**. 3. Clear the **Enable Docker AI** checkbox. -4. Select **Apply & restart**. +4. Select **Apply**. + +### For organizations If you want to disable Ask Gordon for your entire Docker organization, using -[Settings Management](/manuals/security/for-admins/hardened-desktop/settings-management/_index.md), +[Settings +Management](/manuals/enterprise/security/hardened-desktop/settings-management/_index.md), add the following property to your `admin-settings.json` file: ```json @@ -258,7 +238,8 @@ add the following property to your `admin-settings.json` file: } ``` -Alternatively, you can disable all Beta features by setting `allowBetaFeatures` to false: +Alternatively, you can disable all Beta features by setting `allowBetaFeatures` +to false: ```json { @@ -289,4 +270,4 @@ here's how you can get in touch: the **Ask Gordon** view in Docker Desktop, or from the CLI by running the `docker ai feedback` command. -Thank you for helping us improve Ask Gordon. + diff --git a/content/manuals/ai/gordon/images/delete.webp b/content/manuals/ai/gordon/images/delete.webp new file mode 100644 index 000000000000..e939fb150e69 Binary files /dev/null and b/content/manuals/ai/gordon/images/delete.webp differ diff --git a/content/manuals/ai/gordon/images/gordon.png b/content/manuals/ai/gordon/images/gordon.png new file mode 100644 index 000000000000..f2b65c94ca03 Binary files /dev/null and b/content/manuals/ai/gordon/images/gordon.png differ diff --git a/content/manuals/ai/gordon/images/toolbox.png b/content/manuals/ai/gordon/images/toolbox.png new file mode 100644 index 000000000000..1ee8251f7d27 Binary files /dev/null and b/content/manuals/ai/gordon/images/toolbox.png differ diff --git a/content/manuals/ai/gordon/mcp/_index.md b/content/manuals/ai/gordon/mcp/_index.md new file mode 100644 index 000000000000..ebbf14f51c62 --- /dev/null +++ b/content/manuals/ai/gordon/mcp/_index.md @@ -0,0 +1,32 @@ +--- +title: MCP +description: Learn how to use MCP servers with Gordon +keywords: ai, mcp, gordon, docker desktop, docker, llm, +grid: +- title: Built-in tools + description: Use the built-in tools. + icon: construction + link: /ai/gordon/mcp/built-in-tools +- title: MCP configuration + description: Configure MCP tools on a per-project basis. + icon: manufacturing + link: /ai/gordon/mcp/yaml +aliases: + - /desktop/features/gordon/mcp/ +--- + +## What is MCP? + +[Model Context Protocol](https://modelcontextprotocol.io/introduction) (MCP) is +an open protocol that standardizes how applications provide context and extra +functionality to large language models. MCP functions as a client-server +protocol, where the client, for example an application like Gordon, sends +requests, and the server processes those requests to deliver the necessary +context to the AI. This context may be gathered by the MCP server by executing +some code to perform an action and getting the result of the action, calling +external APIs, etc. + +Gordon, along with other MCP clients like Claude Desktop or Cursor, can interact +with MCP servers running as containers. + +{{< grid >}} diff --git a/content/manuals/ai/gordon/mcp/built-in-tools.md b/content/manuals/ai/gordon/mcp/built-in-tools.md new file mode 100644 index 000000000000..9d253f0c445f --- /dev/null +++ b/content/manuals/ai/gordon/mcp/built-in-tools.md @@ -0,0 +1,233 @@ +--- +title: Built-in tools +description: How to use Gordon's built-in tools +keywords: ai, mcp, gordon +aliases: + - /desktop/features/gordon/mcp/built-in-tools/ +--- + +Gordon comes with an integrated toolbox providing access to various system tools +and capabilities. These tools extend Gordon's functionality by allowing it to +interact with the Docker Engine, Kubernetes, Docker Scout's security scanning, +and other developer utilities. This documentation covers the available tools, +their configuration, and usage patterns. + +## Configuration + +Tools can be configured globally in the toolbox, making them accessible +throughout the Gordon interfaces, including both Docker Desktop and the CLI. + +To configure: + +1. On the **Ask Gordon** view in Docker Desktop, select the `Toolbox` button in the bottom left of the input area. + + ![Gordon page with the toolbox button](../images/gordon.png) + +2. To enable or disable a tool, select it in the left-menu and select the toggle. + + ![Gordon's Toolbox](../images/toolbox.png) + + For more information on the available Docker tools, see [Reference](#reference). + +## Usage examples + +This section provides task-oriented examples for common operations with Gordon +tools. + +### Managing Docker containers + +#### List and monitor containers + +```console +# List all running containers +$ docker ai "Show me all running containers" + +# List containers using specific resources +$ docker ai "List all containers using more than 1GB of memory" + +# View logs from a specific container +$ docker ai "Show me logs from my running api-container from the last hour" +``` + +#### Manage container lifecycle + +```console +# Run a new container +$ docker ai "Run a nginx container with port 80 exposed to localhost" + +# Stop a specific container +$ docker ai "Stop my database container" + +# Clean up unused containers +$ docker ai "Remove all stopped containers" +``` + +### Working with Docker images + +```console +# List available images +$ docker ai "Show me all my local Docker images" + +# Pull a specific image +$ docker ai "Pull the latest Ubuntu image" + +# Build an image from a Dockerfile +$ docker ai "Build an image from my current directory and tag it as myapp:latest" + +# Clean up unused images +$ docker ai "Remove all my unused images" +``` + +### Managing Docker volumes + +```console +# List volumes +$ docker ai "List all my Docker volumes" + +# Create a new volume +$ docker ai "Create a new volume called postgres-data" + +# Backup data from a container to a volume +$ docker ai "Create a backup of my postgres container data to a new volume" +``` + +### Kubernetes operations + +```console +# Create a deployment +$ docker ai "Create an nginx deployment and make sure it's exposed locally" + +# List resources +$ docker ai "Show me all deployments in the default namespace" + +# Get logs +$ docker ai "Show me logs from the auth-service pod" +``` + +### Security analysis + + +```console +# Scan for CVEs +$ docker ai "Scan my application for security vulnerabilities" + +# Get security recommendations +$ docker ai "Give me recommendations for improving the security of my nodejs-app image" +``` + +### Development workflows + +```console +# Analyze and commit changes +$ docker ai "Look at my local changes, create multiple commits with sensible commit messages" + +# Review branch status +$ docker ai "Show me the status of my current branch compared to main" +``` + +## Reference + +This section provides a comprehensive listing of the built-in tools you can find +in Gordon's toolbox. + +### Docker tools + +Tools to interact with your Docker containers, images, and volumes. + +#### Container management + +| Name | Description | +|---------------|----------------------------------------| +| `docker` | Access to the Docker CLI | +| `list_builds` | List the builds in the Docker daemon | +| `build_logs` | Show the build logs. | + +#### Volume management + +| Tool | Description | +|------|-------------| +| `list_volumes` | List all Docker volumes | +| `remove_volume` | Remove a Docker volume | +| `create_volume` | Create a new Docker volume | + +#### Image management + +| Tool | Description | +|------|-------------| +| `list_images` | List all Docker images | +| `remove_images` | Remove Docker images | +| `pull_image` | Pull an image from a registry | +| `push_image` | Push an image to a registry | +| `build_image` | Build a Docker image | +| `tag_image` | Tag a Docker image | +| `inspect` | Inspect a Docker object | + +### Kubernetes tools + +Tools to interact with your Kubernetes cluster + +#### Pods + +| Tool | Description | +|------|-------------| +| `list_pods` | List all pods in the cluster | +| `get_pod_logs` | Get logs from a specific pod | + +#### Deployment management + + +| Tool | Description | +|------|-------------| +| `list_deployments` | List all deployments | +| `create_deployment` | Create a new deployment | +| `expose_deployment` | Expose a deployment as a service | +| `remove_deployment` | Remove a deployment | + +#### Service management + +| Tool | Description | +|------|-------------| +| `list_services` | List all services | +| `remove_service` | Remove a service | + +#### Cluster information + +| Tool | Description | +|------|-------------| +| `list_namespaces` | List all namespaces | +| `list_nodes` | List all nodes in the cluster | + +### Docker Scout tools + +Security analysis tools powered by Docker Scout. + +| Tool | Description | +|------|-------------| +| `search_for_cves` | Analyze a Docker image, a project directory, or other artifacts for vulnerabilities using Docker Scout CVEs.search for cves | +| `get_security_recommendations` | Analyze a Docker image, a project directory, or other artifacts for base image update recommendations using Docker Scout. | + +### Developer tools + +General-purpose development utilities. + +| Tool | Description | +|------|-------------| +| `fetch` | Retrieve content from a URL | +| `get_command_help` | Get help for CLI commands | +| `run_command` | Execute shell commands | +| `filesystem` | Perform filesystem operations | +| `git` | Execute git commands | + +### AI model tools + +| Tool | Description | +|------|-------------| +| `list_models` | List all available Docker models | +| `pull_model` | Download an Docker model | +| `run_model` | Query a model with a prompt | +| `remove_model` | Remove an Docker model | + +### Docker MCP Catalog + +If you have enabled the [MCP Toolkit feature](../../mcp-catalog-and-toolkit/_index.md), +all the tools you have enabled and configured are available for Gordon to use. diff --git a/content/manuals/ai/gordon/mcp/yaml.md b/content/manuals/ai/gordon/mcp/yaml.md new file mode 100644 index 000000000000..806e2aaa7f65 --- /dev/null +++ b/content/manuals/ai/gordon/mcp/yaml.md @@ -0,0 +1,134 @@ +--- +title: YAML configuration +description: Learn how to use MCP servers with Gordon +keywords: ai, mcp, gordon +aliases: + - /desktop/features/gordon/mcp/yaml/ +--- + +Docker has partnered with Anthropic to build container images for the [reference +implementations](https://github.com/modelcontextprotocol/servers/) of MCP +servers available on Docker Hub under [the mcp +namespace](https://hub.docker.com/u/mcp). + +When you run the `docker ai` command in your terminal to ask a question, Gordon +looks in the `gordon-mcp.yml` file in your working directory (if present) for a +list of MCP servers that should be used when in that context. The +`gordon-mcp.yml` file is a Docker Compose file that configures MCP servers as +Compose services for Gordon to access. + +The following minimal example shows how you can use the [mcp-time +server](https://hub.docker.com/r/mcp/time) to provide temporal capabilities to +Gordon. For more information, you can check out the [source code and +documentation](https://github.com/modelcontextprotocol/servers/tree/main/src/time). + +Create the `gordon-mcp.yml` file in your working directory and add the time + server: + +```yaml +services: + time: + image: mcp/time +``` + +With this file present, you can now ask Gordon to tell you the time in + another timezone: + + ```bash + $ docker ai 'what time is it now in kiribati?' + + • Calling get_current_time + + The current time in Kiribati (Tarawa) is 9:38 PM on January 7, 2025. + + ``` + +As you can see, Gordon found the MCP time server and called its tool when +needed. + +## Advanced usage + +Some MCP servers need access to your filesystem or system environment variables. +Docker Compose can help with this. Since `gordon-mcp.yml` is a Compose file you +can add bind mounts using the regular Docker Compose syntax, which makes your +filesystem resources available to the container: + +```yaml +services: + fs: + image: mcp/filesystem + command: + - /rootfs + volumes: + - .:/rootfs +``` + +The `gordon-mcp.yml` file adds filesystem access capabilities to Gordon and +since everything runs inside a container Gordon only has access to the +directories you specify. + +Gordon can handle any number of MCP servers. For example, if you give Gordon +access to the internet with the `mcp/fetch` server: + +```yaml +services: + fetch: + image: mcp/fetch + fs: + image: mcp/filesystem + command: + - /rootfs + volumes: + - .:/rootfs +``` + +You can now ask things like: + +```bash +$ docker ai can you fetch rumpl.dev and write the summary to a file test.txt + + • Calling fetch ✔️ + • Calling write_file ✔️ + + The summary of the website rumpl.dev has been successfully written to the file test.txt in the allowed directory. Let me know if you need further assistance! + + +$ cat test.txt +The website rumpl.dev features a variety of blog posts and articles authored by the site owner. Here's a summary of the content: + +1. **Wasmio 2023 (March 25, 2023)**: A recap of the WasmIO 2023 conference held in Barcelona. The author shares their experience as a speaker and praises the organizers for a successful event. + +2. **Writing a Window Manager in Rust - Part 2 (January 3, 2023)**: The second part of a series on creating a window manager in Rust. This installment focuses on enhancing the functionality to manage windows effectively. + +3. **2022 in Review (December 29, 2022)**: A personal and professional recap of the year 2022. The author reflects on the highs and lows of the year, emphasizing professional achievements. + +4. **Writing a Window Manager in Rust - Part 1 (December 28, 2022)**: The first part of the series on building a window manager in Rust. The author discusses setting up a Linux machine and the challenges of working with X11 and Rust. + +5. **Add docker/docker to your dependencies (May 10, 2020)**: A guide for Go developers on how to use the Docker client library in their projects. The post includes a code snippet demonstrating the integration. + +6. **First (October 11, 2019)**: The inaugural post on the blog, featuring a simple "Hello World" program in Go. +``` + +## What’s next? + +Now that you’ve learned how to use MCP servers with Gordon, here are a few ways +you can get started: + +- Experiment: Try integrating one or more of the tested MCP servers into your + `gordon-mcp.yml` file and explore their capabilities. +- Explore the ecosystem: Check out the [reference implementations on + GitHub](https://github.com/modelcontextprotocol/servers/) or browse the + [Docker Hub MCP namespace](https://hub.docker.com/u/mcp) for additional + servers that might suit your needs. +- Build your own: If none of the existing servers meet your needs, or you’re + curious about exploring how they work in more detail, consider developing a + custom MCP server. Use the [MCP + specification](https://www.anthropic.com/news/model-context-protocol) as a + guide. +- Share your feedback: If you discover new servers that work well with Gordon + or encounter issues with existing ones, [share your findings to help improve + the ecosystem](https://docker.qualtrics.com/jfe/form/SV_9tT3kdgXfAa6cWa). + +With MCP support, Gordon offers powerful extensibility and flexibility to meet +your specific use cases whether you’re adding temporal awareness, file +management, or internet access. diff --git a/content/manuals/ai/mcp-catalog-and-toolkit/_index.md b/content/manuals/ai/mcp-catalog-and-toolkit/_index.md new file mode 100644 index 000000000000..c91713b27fc5 --- /dev/null +++ b/content/manuals/ai/mcp-catalog-and-toolkit/_index.md @@ -0,0 +1,52 @@ +--- +title: Docker MCP Catalog and Toolkit +linkTitle: MCP Catalog and Toolkit +params: + sidebar: + group: AI + badge: + color: blue + text: Beta +weight: 30 +description: Learn about Docker's MCP catalog on Docker Hub +keywords: Docker, ai, mcp servers, ai agents, extension, docker desktop, llm, docker hub +grid: + - title: MCP Catalog + description: Learn about the benefits of the MCP Catalog, how you can use it, and how you can contribute + icon: hub + link: /ai/mcp-catalog-and-toolkit/catalog/ + - title: MCP Toolkit + description: Learn about the MCP toolkit to manage MCP servers and clients + icon: /icons/toolkit.svg + link: /ai/mcp-catalog-and-toolkit/toolkit/ +--- + +The Model Context Protocol (MCP) is a modern standard that transforms AI agents from passive responders into action-oriented systems. By standardizing how tools are described, discovered, and invoked, MCP enables agents to securely query APIs, access data, and execute services across diverse environments. + +As agents move into production, MCP solves common integration challenges — interoperability, reliability, and security — by providing a consistent, decoupled, and scalable interface between agents and tools. Just as containers redefined software deployment, MCP is reshaping how AI systems interact with the world. + +> **Example** +> +> In simple terms, an MCP server is a way for an LLM to interact with an external system. +> +> For example: +> If you ask a model to create a meeting, it needs to communicate with your calendar app to do that. +> An MCP server for your calendar app provides _tools_ that perform atomic actions, such as: +> "getting the details of a meeting" or "creating a new meeting". + +## What is Docker MCP Catalog and Toolkit? + +Docker MCP Catalog and Toolkit is a comprehensive solution for securely building, sharing, and running MCP tools. It simplifies the developer experience across these key areas: + +- Discovery: A central catalog with verified, versioned tools +- Credential Management: OAuth-based and secure by default +- Execution: Tools run in isolated, containerized environments +- Portability: Use MCP tools across Claude, Cursor, VS Code, and more — no code changes needed + +With Docker Hub and the MCP Toolkit, you can: + +- Launch MCP servers in seconds +- Add tools via CLI or GUI +- Rely on Docker's pull-based infrastructure for trusted delivery + +{{< grid >}} diff --git a/content/manuals/ai/mcp-catalog-and-toolkit/catalog.md b/content/manuals/ai/mcp-catalog-and-toolkit/catalog.md new file mode 100644 index 000000000000..7526a7833fb1 --- /dev/null +++ b/content/manuals/ai/mcp-catalog-and-toolkit/catalog.md @@ -0,0 +1,55 @@ +--- +title: Docker MCP Catalog +description: Learn about the benefits of the MCP Catalog, how you can use it, and how you can contribute +keywords: docker hub, mcp, mcp servers, ai agents, catalog, docker +--- + +The [Docker MCP Catalog](https://hub.docker.com/mcp) is a centralized, trusted registry for discovering, sharing, and running MCP-compatible tools. Seamlessly integrated into Docker Hub, it offers verified, versioned, and curated MCP servers packaged as Docker images. The catalog is also available in Docker Desktop. + +The catalog solves common MCP server challenges: + +- Environment conflicts: Tools often need specific runtimes that may clash with existing setups. +- Lack of isolation: Traditional setups risk exposing the host system. +- Setup complexity: Manual installation and configuration result in slow adoption. +- Inconsistency across platforms: Tools may behave unpredictably on different OSes. + +With Docker, each MCP server runs as a self-contained container so it is +portable, isolated, and consistent. You can launch tools instantly using Docker +CLI or Docker Desktop, without worrying about dependencies or compatibility. + +## Key features + +- Over 100 verified MCP servers in one place +- Publisher verification and versioned releases +- Pull-based distribution using Docker's infrastructure +- Tools provided by partners such as New Relic, Stripe, Grafana, and more + +## How it works + +Each tool in the MCP Catalog is packaged as a Docker image with metadata: + +- Discover tools via Docker Hub under the `mcp/` namespace. +- Connect tools to their preferred agents with simple configuration through the [MCP Toolkit](toolkit.md). +- Pull and run tools using Docker Desktop or the CLI. + +Each catalog entry displays: + +- Tool description and metadata +- Version history +- List of tools provided by the MCP server +- Example configuration for agent integration + +## Use an MCP server from the catalog + +To use an MCP server from the catalog, see [MCP toolkit](toolkit.md). + +## Contribute an MCP server to the catalog + +The MCP server registry is available at https://github.com/docker/mcp-registry. To submit an MCP server, +follow the [contributing guidelines](https://github.com/docker/mcp-registry/blob/main/CONTRIBUTING.md). + +When your pull request is reviewed and approved, your MCP server is available in 24 hours on: + +- Docker Desktop's [MCP Toolkit feature](toolkit.md) +- The [Docker MCP catalog](https://hub.docker.com/mcp) +- The [Docker Hub](https://hub.docker.com/u/mcp) `mcp` namespace (for MCP servers built by Docker) diff --git a/content/manuals/ai/mcp-catalog-and-toolkit/hub-mcp.md b/content/manuals/ai/mcp-catalog-and-toolkit/hub-mcp.md new file mode 100644 index 000000000000..0ece647c8c2e --- /dev/null +++ b/content/manuals/ai/mcp-catalog-and-toolkit/hub-mcp.md @@ -0,0 +1,234 @@ +--- +title: Docker Hub MCP server +description: Use the MCP Toolkit to set up MCP servers and MCP clients. +keywords: Docker Hub MCP Server, Hub MCP server, Hub MCP +--- + +The Docker Hub MCP Server is a Model Context Protocol (MCP) server that interfaces with Docker Hub APIs to make rich image metadata accessible to LLMs, enabling intelligent content discovery and repository management. Developers building with containers, especially in AI and LLM-powered workflows, often face inadequate context across the vast landscape of Docker Hub images. As a result, LLMs struggle to recommend the right images, and developers lose time manually searching instead of building. + +## Key features + +- Advanced LLM context: Our MCP Server provides LLMs with detailed, structured context for Docker Hub images, enabling smarter, more relevant recommendations for developers, whether they're choosing a base image or automating CI/CD workflows. +- Natural language image discovery: Developers can find the right container image using natural language, no need to remember tags or repository names. Just describe what you need, and Docker Hub will return images that match your intent. +- Simplified repository management: Hub MCP Server enables agents to manage repositories through natural language fetching image details, viewing stats, searching content, and performing key operations quickly and easily. + +## Install Docker Hub MCP server + +1. From the **MCP Toolkit** menu, select the **Catalog** tab and search for **Docker Hub** and select the plus icon to add the Docker Hub MCP server. +1. In the server's **Configuration** tab, insert your Docker Hub username and personal access token (PAT). +1. In the **Clients** tab in MCP Toolkit, ensure Gordon is connected. +1. From the **Ask Gordon** menu, you can now send requests related to your + Docker Hub account, in accordance to the tools provided by the Docker Hub MCP server. To test it, ask Gordon: + + ```text + What repositories are in my namespace? + ``` + +> [!TIP] +> By default, the Gordon [client](/manuals/ai/mcp-catalog-and-toolkit/toolkit.md#install-an-mcp-client) is enabled, +> which means Gordon can automatically interact with your MCP servers. + +## Using other clients + +If want to integrate the Docker Hub MCP Server into your own development environment, you can find the source code and installation instructions on our [GitHub repository](https://github.com/docker/hub-mcp). + +## Use Claude Desktop as a client + +1. Add the Docker Hub MCP Server configuration to your `claude_desktop_config.json`: + +### For public repositories only + + - `/FULL/PATH/TO/YOUR/docker-hub-mcp-server` - The complete path to where you cloned this repository +```json +{ + "mcpServers": { + "docker-hub": { + "command": "node", + "args": ["/FULL/PATH/TO/YOUR/docker-hub-mcp-server/dist/index.js", "--transport=stdio"] + } + } +} +``` + +### For authenticated access + + Replace the following values: + - `YOUR_DOCKER_HUB_USERNAME` - Your Docker Hub username + - `YOUR_DOCKER_HUB_PERSONAL_ACCESS_TOKEN` - Your Docker Hub Personal Access Token + - `/FULL/PATH/TO/YOUR/docker-hub-mcp-server` - The complete path to where you cloned this + +```json +{ + "mcpServers": { + "docker-hub": { + "command": "node", + "args": ["/FULL/PATH/TO/YOUR/docker-hub-mcp-server/dist/index.js", "--transport=stdio", "--username=YOUR_DOCKER_HUB_USERNAME"], + "env": { + "HUB_PAT_TOKEN": "YOUR_DOCKER_HUB_PERSONAL_ACCESS_TOKEN" + } + } + } +} +``` + +1. Save the configuration file and completely restart Claude Desktop for the changes to take effect. + +## Usage with VS Code + +1. Add the Docker Hub MCP Server configuration to your User Settings (JSON) file in VS Code. You can do this by opening the `Command Palette` and typing `Preferences: Open User Settings (JSON)`. + +### For public repositories only + + + - `/FULL/PATH/TO/YOUR/docker-hub-mcp-server` - The complete path to where you cloned this repository +```json +{ + "mcpServers": { + "docker-hub": { + "command": "node", + "args": ["/FULL/PATH/TO/YOUR/docker-hub-mcp-server/dist/index.js", "--transport=stdio"] + } + } +} +``` + +### For authenticated access + + Replace the following values: + - `YOUR_DOCKER_HUB_USERNAME` - Your Docker Hub username + - `YOUR_DOCKER_HUB_PERSONAL_ACCESS_TOKEN` - Your Docker Hub Personal Access Token + - `/FULL/PATH/TO/YOUR/docker-hub-mcp-server` - The complete path to where you cloned this + +```json +{ + "mcpServers": { + "docker-hub": { + "command": "node", + "args": ["/FULL/PATH/TO/YOUR/docker-hub-mcp-server/dist/index.js", "--transport=stdio"], + "env": { + "HUB_USERNAME": "YOUR_DOCKER_HUB_USERNAME", + "HUB_PAT_TOKEN": "YOUR_DOCKER_HUB_PERSONAL_ACCESS_TOKEN" + } + } + } +} +``` + +1. Open the `Command Palette` and type `MCP: List Servers`. +1. Select `docker-hub` and select `Start Server`. + +## Usage examples + +This section provides task-oriented examples for common operations with Docker Hub +tools. + +### Finding images + + +```console +# Search for official images +$ docker ai "Search for official nginx images on Docker Hub" + +# Search for lightweight images to reduce deployment size and improve performance +$ docker ai "Search for minimal Node.js images with small footprint" + +# Get the most recent tag of a base image +$ docker ai "Show me the latest tag details for go" + +# Find a production-ready database with enterprise features and reliability +$ docker ai "Search for production ready database images" + +# Compare Ubuntu versions to choose the right one for my project +$ docker ai "Help me find the right Ubuntu version for my project" +``` + +### Repository management + +```console +# Create a repository +$ docker ai "Create a repository in my namespace" + +# List all repositories in my namespace +$ docker ai "List all repositories in my namespace" + +# Find the largest repository in my namespace +$ docker ai "Which of my repositories takes up the most space?" + +# Find repositories that haven't been updated recently +$ docker ai "Which of my repositories haven't had any pushes in the last 60 days?" + +# Find which repositories are currently active and being used +$ docker ai "Show me my most recently updated repositories" + +# Get details about a repository +$ docker ai "Show me information about my '' repository" +``` + +### Pull/push images + + +```console +# Pull latest PostgreSQL version +$ docker ai "Pull the latest postgres image" + +# Push image to your Docker Hub repository +$ docker ai "Push my to my repository" +``` + +### Tag management + +```console +# List all tags for a repository +$ $ docker ai "Show me all tags for my '' repository" + +# Find the most recently pushed tag +$ docker ai "What's the most recent tag pushed to my '' repository?" + +# List tags with architecture filtering +$ docker ai "List tags for in the '' repository that support amd64 architecture" + +# Get detailed information about a specific tag +$ docker ai "Show me details about the '' tag in the '' repository" + +# Check if a specific tag exists +$ docker ai "Check if version 'v1.2.0' exists for my 'my-web-app' repository" +``` + +### Docker Hardened Images + +```console +# List available hardened images +$ docker ai "What is the most secure image I can use to run a node.js application?" + +# Convert Dockerfile to use a hardened image +$ docker ai "Can you help me update my Dockerfile to use a docker hardened image instead of the current one" +``` +> [!NOTE] +> To access Docker Hardened Images, a subscription is required. If you're interested in using Docker Hardened Images, visit [Docker Hardened Images](https://www.docker.com/products/hardened-images/). + + +## Reference + +This section provides a comprehensive listing of the tools you can find +in the Docker Hub MCP Server. + +### Docker Hub MCP server tools + +Tools to interact with your Docker repositories and discover content on Docker Hub. + +| Name | Description | +|------|-------------| +| `check-repository` | Check repository | +| `check-repository-tag` | Check repository tag | +| `check-repository-tags` | Check repository tags | +| `create-repository` | Creates a new repository | +| `docker-hardened-images` | Lists available [Docker Hardened Images](https://www.docker.com/products/hardened-images/) in specified namespace | +| `get-namespaces` | Get organizations/namespaces for a user | +| `get-repository-dockerfile` | Gets dockerfile for repository | +| `get-repository-info` | Gets repository info | +| `list-repositories-by-namespace` | Lists repositories under namespace | +| `list-repository-tags` | List repository tags | +| `read-repository-tag` | Read repository tag | +| `search` | Search content on Docker Hub | +| `set-repository-dockerfile` | Sets Dockerfile for repository | +| `update-repository-info` | Updates repository info | diff --git a/content/manuals/ai/mcp-catalog-and-toolkit/images/copilot-mode.png b/content/manuals/ai/mcp-catalog-and-toolkit/images/copilot-mode.png new file mode 100644 index 000000000000..9ce6e961c5c6 Binary files /dev/null and b/content/manuals/ai/mcp-catalog-and-toolkit/images/copilot-mode.png differ diff --git a/content/manuals/ai/mcp-catalog-and-toolkit/images/tools.png b/content/manuals/ai/mcp-catalog-and-toolkit/images/tools.png new file mode 100644 index 000000000000..4439dc4b5e1f Binary files /dev/null and b/content/manuals/ai/mcp-catalog-and-toolkit/images/tools.png differ diff --git a/content/manuals/ai/mcp-catalog-and-toolkit/toolkit.md b/content/manuals/ai/mcp-catalog-and-toolkit/toolkit.md new file mode 100644 index 000000000000..6a0696443df9 --- /dev/null +++ b/content/manuals/ai/mcp-catalog-and-toolkit/toolkit.md @@ -0,0 +1,231 @@ +--- +title: MCP Toolkit +description: Use the MCP Toolkit to set up MCP servers and MCP clients. +keywords: Docker MCP Toolkit, MCP server, MCP client, AI agents +aliases: + - /desktop/features/gordon/mcp/gordon-mcp-server/ + - /ai/gordon/mcp/gordon-mcp-server/ +--- + +The Docker MCP Toolkit is a gateway that enables seamless setup, management, and execution of containerized MCP servers and their connections to AI agents. It removes the friction from tool usage by offering secure defaults, one-click setup, and support for a growing ecosystem of LLM-based clients. It is the fastest path from MCP tool discovery to local execution. + +## Key features + +- Cross-LLM compatibility: Instantly works with Claude Desktop, Cursor, Continue.dev, and [Gordon](/manuals/ai/gordon/_index.md). +- Integrated tool discovery: Browse and launch MCP servers from the Docker MCP Catalog directly in Docker Desktop. +- Zero manual setup: No dependency management, runtime configuration, or server setup required. +- Functions as both an MCP server aggregator and a gateway for clients to access installed MCP servers. + +## How the MCP toolkit works + +MCP introduces two core concepts: MCP clients and MCP servers. + +- MCP clients are typically embedded in LLM-based applications, such as + the Claude Desktop App. They request resources or actions. +- MCP servers are launched by the client to perform the requested tasks, + using any necessary tools, languages, or processes. + +Docker standardizes the development, packaging, and distribution of +applications, including MCP servers. By packaging MCP servers as containers, +Docker eliminates issues related to isolation and environment differences. Users +can run a container directly, without managing dependencies or configuring +runtimes. + +Depending on the MCP server, the tools it provides may run within the same container +as the server or in dedicated containers: + + +{{< tabs group="" >}} +{{< tab name="Single container">}} + +![Visualisation of the MCP toolkit](/assets/images/mcp_servers.png) + +{{< /tab >}} +{{< tab name="Separate containers">}} + +![Visualisation of the MCP toolkit](/assets/images/mcp_servers_2.png) + +{{< /tab >}} +{{}} + +## Security + +The Docker MCP Toolkit combines passive and active measures to reduce attack +surfaces and ensure safe runtime behavior. + +### Passive security + +- Image signing and attestation: All MCP server images under `mcp/` in the [catalog](catalog.md) + are built by Docker and digitally + signed to verify their source and integrity. Each image includes a Software + Bill of Materials (SBOM) for full transparency. + +### Active security + +Security at runtime is enforced through resource and access limitations: + +- CPU allocation: MCP tools are run in their own container. They are + restricted to 1 CPU, limiting the impact of potential misuse of computing + resources. + +- Memory allocation: Containers for MCP tools are limited to 2 GB. + +- Filesystem access: By default, MCP Servers have no access to the host filesystem. + The user explicitly selects the servers that will be granted file mounts. + +- Interception of tool requests: Requests to and from tools that contain sensitive + information such as secrets are blocked. + +## Enable Docker MCP Toolkit + +1. Open the Docker Desktop settings and select **Beta features**. +2. Select **Enable Docker MCP Toolkit**. +3. Select **Apply**. + +>[!NOTE] +> +> This feature started as the MCP Toolkit _extension_. This extension is now deprecated +>and should be uninstalled. + +## Install an MCP server + +To install an MCP server: + +1. In Docker Desktop, select **MCP Toolkit** and select the **Catalog** tab. + When you select a server you can see the following + information: + + - Tool name and description + - Partner/publisher + - The list of callable tools the server provides. + +2. Find the MCP server of your choice and select the **Plus** icon. +3. Optional: Some servers require extra configuration. To configure them, select + the **Config** tab and follow the instructions available on the repository of the provider of the MCP server. + +> [!TIP] +> By default, the Gordon [client](#install-an-mcp-client) is enabled, +> which means Gordon can automatically interact with your MCP servers. + +To learn more about the MCP server catalog, see [Catalog](catalog.md). + +### Example: Use the **GitHub Official** MCP server + +Imagine you want to enable Ask Gordon to interact with your GitHub account: + +1. From the **MCP Toolkit** menu, select the **Catalog** tab and find + the **GitHub Official** server and add it. +2. In the server's **Config** tab, [connect via OAuth](#authenticate-via-oauth). +3. In the **Clients** tab, ensure Gordon is connected. +4. From the **Ask Gordon** menu, you can now send requests related to your + GitHub account, in accordance to the tools provided by the GitHub Official server. To test it, ask Gordon: + + ```text + What's my GitHub handle? + ``` + + Make sure to allow Gordon to interact with GitHub by selecting **Always allow** in Gordon's answer. + +## Install an MCP client + +When you have installed MCP servers, you can add clients to the MCP Toolkit. These clients +can interact with the installed MCP servers, turning the MCP Toolkit into a gateway. + +To install a client: + +1. In Docker Desktop, select **MCP Toolkit** and select the **Clients** tab. +2. Find the client of your choice and select **Connect**. + +Your client can now interact with the MCP Toolkit. + +### Example: Use Claude Desktop as a client + +Imagine you have Claude Desktop installed, and you want to use the GitHub MCP server, +and the Puppeteer MCP server, you do not have to install the servers in Claude Desktop. +You can simply install these 2 MCP servers in the MCP Toolkit, +and add Claude Desktop as a client: + +1. From the **MCP Toolkit** menu, select the **Catalog** tab and find the **Puppeteer** server and add it. +2. Repeat for the **GitHub Official** server. +3. From the **Clients** tab, select **Connect** next to **Claude Desktop**. Restart + Claude Desktop if it's running, and it can now access all the servers in the MCP Toolkit. +4. Within Claude Desktop, run a test by submitting the following prompt using the Sonnet 3.5 model: + + ```text + Take a screenshot of docs.docker.com and then invert the colors + ``` + +### Example: Use Visual Studio Code as a client + +You can interact with all your installed MCP servers in VS Code: + +1. To enable the MCP Toolkit: + + + {{< tabs group="" >}} + {{< tab name="Enable globally">}} + + 1. Insert the following in your VS Code's User`settings.json`: + + ```json + "mcp": { + "servers": { + "MCP_DOCKER": { + "command": "docker", + "args": [ + "mcp", + "gateway", + "run" + ], + "type": "stdio" + } + } + } + ``` + + {{< /tab >}} + {{< tab name="Enable for a given project">}} + + 1. In your terminal, navigate to your project's folder. + 1. Run: + + ```bash + docker mcp client connect vscode + ``` + + > [!NOTE] + > This command creates a `.vscode/mcp.json` file in the current directory. We + > recommend you add it to your `.gitignore` file. + + {{< /tab >}} + {{}} + +1. In Visual Studio Code, open a new Chat and select the **Agent** mode: + + ![Copilot mode switching](./images/copilot-mode.png) + +1. You can also check the available MCP tools: + + ![Displaying tools in VSCode](./images/tools.png) + +For more information about the Agent mode, see the +[Visual Studio Code documentation](https://code.visualstudio.com/docs/copilot/chat/mcp-servers#_use-mcp-tools-in-agent-mode). + +## Authenticate via OAuth + +You can connect the MCP Toolkit to your development workflow via +OAuth integration. For now, the MCP Toolkit only supports GitHub OAuth. + +1. On https://github.com/, ensure you are signed in. +1. In Docker Desktop, select **MCP Toolkit** and select the **OAuth** tab. +1. In the GitHub entry, select **Authorize**. Your browser opens the GitHub authorization page. +1. In the GitHub authorization page, select **Authorize Docker**. Once the authorization + is successful, you are automatically redirected to Docker Desktop. +1. Install the **GitHub Official** MCP server, see [Install an MCP server](#install-an-mcp-server). + +The MCP Toolkit now has access to your GitHub account. To revoke access, select **Revoke** in the **OAuth** tab. +See an example in [Use the **GitHub Official** MCP server](#example-use-the-github-official-mcp-server). + +## Related pages + +- [Open-source MCP Gateway](/manuals/ai/mcp-gateway/_index.md) diff --git a/content/manuals/ai/mcp-gateway/_index.md b/content/manuals/ai/mcp-gateway/_index.md new file mode 100644 index 000000000000..36fbac551a51 --- /dev/null +++ b/content/manuals/ai/mcp-gateway/_index.md @@ -0,0 +1,110 @@ +--- +title: MCP Gateway +description: "Docker's MCP Gateway provides secure, centralized, and scalable orchestration of AI tools through containerized MCP servers—empowering developers, operators, and security teams." +keywords: MCP Gateway +params: + sidebar: + group: Open source +--- + +The MCP Gateway is Docker's open-source enterprise-ready solution for orchestrating and +managing [Model Context Protocol (MCP)](https://spec.modelcontextprotocol.io/) servers +securely across development and production environments. +It is designed to help organizations connect MCP servers from the [Docker MCP Catalog](https://hub.docker.com/mcp) to MCP Clients without compromising security, visibility, or control. + +By unifying multiple MCP servers into a single, secure endpoint, the MCP Gateway offers +the following benefits: + +- Secure by default: MCP servers run in isolated Docker containers with restricted + privileges, network access, and resource usage. +- Unified management: One gateway endpoint centralizes configuration, credentials, + and access control for all MCP servers. +- Enterprise observability: Built-in monitoring, logging, and filtering tools ensure + full visibility and governance of AI tool activity. + +## Who is the MCP Gateway designed for? + +The MCP Gateway solves problems encountered by various groups: + +- Developers: Deploy MCP servers locally and in production using Docker Compose, + with built-in support for protocol handling, credential management, and security policies. +- Security teams: Achieve enterprise-grade isolation and visibility into AI tool + behavior and access patterns. +- Operators: Scale effortlessly from local development environments to production + infrastructure with consistent, low-touch operations. + +## Key features + +- Server management: List, inspect, and call MCP tools, resources and prompts from multiple servers +- Container-based servers: Run MCP servers as Docker containers with proper isolation +- Secrets management: Secure handling of API keys and credentials via Docker Desktop +- Dynamic discovery and reloading: Automatic tool, prompt, and resource discovery from running servers +- Monitoring: Built-in logging and call tracing capabilities + +## Install a pre-release version of the MCP Gateway + +If you use Docker Desktop, the MCP Gateway is readily available. Use the +following instructions to test pre-release versions. + +### Prerequisites + +- Docker Desktop with the [MCP Toolkit feature enabled](../mcp-catalog-and-toolkit/toolkit.md#enable-docker-mcp-toolkit). +- Go 1.24+ (for development) + +### Install using a pre-built binary + +You can download the latest binary from the [GitHub releases page](https://github.com/docker/mcp-gateway/releases/latest). + +Rename the relevant binary and copy it to the destination matching your OS: + +| OS | Binary name | Destination folder | +|---------|---------------------|-------------------------------------| +| Linux | `docker-mcp` | `$HOME/.docker/cli-plugins` | +| macOS | `docker-mcp` | `$HOME/.docker/cli-plugins` | +| Windows | `docker-mcp.exe` | `%USERPROFILE%\.docker\cli-plugins` | + +Or copy it into one of these folders for installing it system-wide: + + +{{< tabs group="" >}} +{{< tab name="On Unix environments">}} + + +* `/usr/local/lib/docker/cli-plugins` OR `/usr/local/libexec/docker/cli-plugins` +* `/usr/lib/docker/cli-plugins` OR `/usr/libexec/docker/cli-plugins` + +> [!NOTE] +> You may have to make the binaries executable with `chmod +x`: + +> ```bash +> $ chmod +x ~/.docker/cli-plugins/docker-mcp +> ``` + +{{< /tab >}} +{{< tab name="On Windows">}} + +* `C:\ProgramData\Docker\cli-plugins` +* `C:\Program Files\Docker\cli-plugins` + +{{< /tab >}} +{{}} + +You can now use the `mcp` command: + +```bash +docker mcp --help +``` + +## Use the MCP Gateway + +Run: + +```bash +docker mcp gateway run +``` + +To view all the commands and configuration options, go to the [mcp-gateway repository](https://github.com/docker/mcp-gateway?tab=readme-ov-file#usage). + +## Related pages + +- [Docker MCP toolkit and catalog](/manuals/ai/mcp-catalog-and-toolkit/_index.md) diff --git a/content/manuals/ai/model-runner/_index.md b/content/manuals/ai/model-runner/_index.md new file mode 100644 index 000000000000..5e6ecdc0a9ba --- /dev/null +++ b/content/manuals/ai/model-runner/_index.md @@ -0,0 +1,460 @@ +--- +title: Docker Model Runner +params: + sidebar: + badge: + color: blue + text: Beta + group: AI +weight: 20 +description: Learn how to use Docker Model Runner to manage and run AI models. +keywords: Docker, ai, model runner, docker desktop, docker engine, llm +aliases: + - /desktop/features/model-runner/ + - /model-runner/ +--- + +{{< summary-bar feature_name="Docker Model Runner" >}} + +Docker Model Runner makes it easy to manage, run, and +deploy AI models using Docker. Designed for developers, +Docker Model Runner streamlines the process of pulling, running, and serving +large language models (LLMs) and other AI models directly from Docker Hub or any +OCI-compliant registry. + +With seamless integration into Docker Desktop and Docker +Engine, you can serve models via OpenAI-compatible APIs, package GGUF files as +OCI Artifacts, and interact with models from both the command line and graphical +interface. + +Whether you're building generative AI applications, experimenting with machine +learning workflows, or integrating AI into your software development lifecycle, +Docker Model Runner provides a consistent, secure, and efficient way to work +with AI models locally. + +## Key features + +- [Pull and push models to and from Docker Hub](https://hub.docker.com/u/ai) +- Serve models on OpenAI-compatible APIs for easy integration with existing apps +- Package GGUF files as OCI Artifacts and publish them to any Container Registry +- Run and interact with AI models directly from the command line or from the Docker Desktop GUI +- Manage local models and display logs + +## Requirements + +Docker Model Runner is supported on the following platforms: + +{{< tabs >}} +{{< tab name="Windows">}} + +Windows(amd64): +- NVIDIA GPUs +- NVIDIA drivers 576.57+ + +Windows(arm64): +- OpenCL for Adreno +- Qualcomm Adreno GPU (6xx series and later) + + > [!NOTE] + > Some llama.cpp features might not be fully supported on the 6xx series. + +{{< /tab >}} +{{< tab name="MacOS">}} + +- Apple Silicon + +{{< /tab >}} +{{< tab name="Linux">}} + +Docker Engine only: + +- Linux CPU & Linux NVIDIA +- NVIDIA drivers 575.57.08+ + +{{< /tab >}} +{{}} + + +## How it works + +Models are pulled from Docker Hub the first time they're used and stored locally. They're loaded into memory only at runtime when a request is made, and unloaded when not in use to optimize resources. Since models can be large, the initial pull may take some time — but after that, they're cached locally for faster access. You can interact with the model using [OpenAI-compatible APIs](#what-api-endpoints-are-available). + +> [!TIP] +> +> Using Testcontainers or Docker Compose? +> [Testcontainers for Java](https://java.testcontainers.org/modules/docker_model_runner/) +> and [Go](https://golang.testcontainers.org/modules/dockermodelrunner/), and +> [Docker Compose](/manuals/ai/compose/models-and-compose.md) now support Docker Model Runner. + +## Enable Docker Model Runner + +### Enable DMR in Docker Desktop + +1. In the settings view, navigate to the **Beta features** tab. +1. Tick the **Enable Docker Model Runner** setting. +1. If you are running on Windows with a supported NVIDIA GPU, you should also see and be able to tick the **Enable GPU-backed inference** setting. +1. Optional: If you want to enable TCP support, select the **Enable host-side TCP support** + 1. In the **Port** field, type the port of your choice. + 1. If you are interacting with Model Runner from a local frontend web app, + in **CORS Allows Origins**, select the origins that Model Runner should accept requests from. + An origin is the URL where your web app is running, for example `http://localhost:3131`. + +You can now use the `docker model` command in the CLI and view and interact with your local models in the **Models** tab in the Docker Desktop Dashboard. + +> [!IMPORTANT] +> +> For Docker Desktop versions 4.41 and earlier, this setting lived under the **Experimental features** tab on the **Features in development** page. + +### Enable DMR in Docker Engine + +1. Ensure you have installed [Docker Engine](/engine/install/). +1. DMR is available as a package. To install it, run: + + {{< tabs >}} + {{< tab name="Ubuntu/Debian">}} + + ```console + $ sudo apt-get update + $ sudo apt-get install docker-model-plugin + ``` + + {{< /tab >}} + {{< tab name="RPM-base distributions">}} + + ```console + $ sudo dnf update + $ sudo dnf install docker-model-plugin + ``` + + {{< /tab >}} + {{< /tabs >}} + +1. Test the installation: + + ```console + $ docker model version + $ docker model run ai/smollm2 + ``` + +> [!NOTE] +> TCP support is enabled by default for Docker Engine on port `12434`. + +### Update DMR in Docker Engine + +To update Docker Model Runner in Docker Engine, uninstall it with [`docker model uninstall-runner`](/reference/cli/docker/model/uninstall-runner/) then reinstall it: + +```console +docker model uninstall-runner --images && docker model install-runner +``` + +> [!NOTE] +> With the above command, local models are preserved. +> To delete the models during the upgrade, add the `--models` option to the `uninstall-runner` command. + +## Pull a model + +Models are cached locally. + +> [!NOTE] +> +> When working with the Docker CLI, you can also pull models directly from [HuggingFace](https://huggingface.co/). + +{{< tabs group="release" >}} +{{< tab name="From Docker Desktop">}} + +1. Select **Models** and select the **Docker Hub** tab. +1. Find the model of your choice and select **Pull**. + +![screencapture of the Docker Hub view](./images/dmr-catalog.png) + +{{< /tab >}} +{{< tab name="From the Docker CLI">}} + +Use the [`docker model pull` command](/reference/cli/docker/model/pull/). For example: + +```bash {title="Pulling from Docker Hub"} +docker model pull ai/smollm2:360M-Q4_K_M +``` + +```bash {title="Pulling from HuggingFace"} +docker model pull hf.co/bartowski/Llama-3.2-1B-Instruct-GGUF +``` + +{{< /tab >}} +{{< /tabs >}} + +## Run a model + +{{< tabs group="release" >}} +{{< tab name="From Docker Desktop">}} + +1. Select **Models** and select the **Local** tab +1. Click the play button. The interactive chat screen opens. + +![screencapture of the Local view](./images/dmr-run.png) + +{{< /tab >}} +{{< tab name="From the Docker CLI" >}} + +Use the [`docker model run` command](/reference/cli/docker/model/run/). + +{{< /tab >}} +{{< /tabs >}} + +## Troubleshooting + +To troubleshoot potential issues, display the logs: + +{{< tabs group="release" >}} +{{< tab name="From Docker Desktop">}} + +Select **Models** and select the **Logs** tab. + +![screencapture of the Models view](./images/dmr-logs.png) + +{{< /tab >}} +{{< tab name="From the Docker CLI">}} + +Use the [`docker model logs` command](/reference/cli/docker/model/logs/). + +{{< /tab >}} +{{< /tabs >}} + +## Publish a model + +> [!NOTE] +> +> This works for any Container Registry supporting OCI Artifacts, not only Docker Hub. + +You can tag existing models with a new name and publish them under a different namespaceand repository: + +```console +# Tag a pulled model under a new name +$ docker model tag ai/smollm2 myorg/smollm2 + +# Push it to Docker Hub +$ docker model push myorg/smollm2 +``` + +For more details, see the [`docker model tag`](/reference/cli/docker/model/tag) and [`docker model push`](/reference/cli/docker/model/push) command documentation. + +You can also directly package a model file in GGUF format as an OCI Artifact and publish it to Docker Hub. + +```console +# Download a model file in GGUF format, e.g. from HuggingFace +$ curl -L -o model.gguf https://huggingface.co/TheBloke/Mistral-7B-v0.1-GGUF/resolve/main/mistral-7b-v0.1.Q4_K_M.gguf + +# Package it as OCI Artifact and push it to Docker Hub +$ docker model package --gguf "$(pwd)/model.gguf" --push myorg/mistral-7b-v0.1:Q4_K_M +``` + +For more details, see the [`docker model package`](/reference/cli/docker/model/package/) command documentation. + +## Example: Integrate Docker Model Runner into your software development lifecycle + +You can now start building your Generative AI application powered by the Docker Model Runner. + +If you want to try an existing GenAI application, follow these instructions. + +1. Set up the sample app. Clone and run the following repository: + + ```console + $ git clone https://github.com/docker/hello-genai.git + ``` + +2. In your terminal, navigate to the `hello-genai` directory. + +3. Run `run.sh` for pulling the chosen model and run the app(s): + +4. Open you app in the browser at the addresses specified in the repository [README](https://github.com/docker/hello-genai). + +You'll see the GenAI app's interface where you can start typing your prompts. + +You can now interact with your own GenAI app, powered by a local model. Try a few prompts and notice how fast the responses are — all running on your machine with Docker. + +## FAQs + +### What models are available? + +All the available models are hosted in the [public Docker Hub namespace of `ai`](https://hub.docker.com/u/ai). + +### What CLI commands are available? + +See [the reference docs](/reference/cli/docker/model/). + +### What API endpoints are available? + +Once the feature is enabled, new API endpoints are available under the following base URLs: + +{{< tabs >}} +{{< tab name="Docker Desktop">}} + +- From containers: `http://model-runner.docker.internal/` +- From host processes: `http://localhost:12434/`, assuming TCP host access is + enabled on the default port (12434). + +{{< /tab >}} +{{< tab name="Docker Engine">}} + +- From containers: `http://172.17.0.1:12434/` (with `172.17.0.1` representing the host gateway address) +- From host processes: `http://localhost:12434/` + +> [!NOTE] +> The `172.17.0.1` interface may not be available by default to containers + within a Compose project. +> In this case, add an `extra_hosts` directive to your Compose service YAML: +> +> ```yaml +> extra_hosts: +> - "model-runner.docker.internal:host-gateway" +> ``` +> Then you can access the Docker Model Runner APIs at http://model-runner.docker.internal:12434/ + +{{< /tab >}} +{{}} + +Docker Model management endpoints: + +```text +POST /models/create +GET /models +GET /models/{namespace}/{name} +DELETE /models/{namespace}/{name} +``` + +OpenAI endpoints: + +```text +GET /engines/llama.cpp/v1/models +GET /engines/llama.cpp/v1/models/{namespace}/{name} +POST /engines/llama.cpp/v1/chat/completions +POST /engines/llama.cpp/v1/completions +POST /engines/llama.cpp/v1/embeddings +``` + +To call these endpoints via a Unix socket (`/var/run/docker.sock`), prefix their path with +with `/exp/vDD4.40`. + +> [!NOTE] +> You can omit `llama.cpp` from the path. For example: `POST /engines/v1/chat/completions`. + +### How do I interact through the OpenAI API? + +#### From within a container + +To call the `chat/completions` OpenAI endpoint from within another container using `curl`: + +```bash +#!/bin/sh + +curl http://model-runner.docker.internal/engines/llama.cpp/v1/chat/completions \ + -H "Content-Type: application/json" \ + -d '{ + "model": "ai/smollm2", + "messages": [ + { + "role": "system", + "content": "You are a helpful assistant." + }, + { + "role": "user", + "content": "Please write 500 words about the fall of Rome." + } + ] + }' + +``` + +#### From the host using TCP + +To call the `chat/completions` OpenAI endpoint from the host via TCP: + +1. Enable the host-side TCP support from the Docker Desktop GUI, or via the [Docker Desktop CLI](/manuals/desktop/features/desktop-cli.md). + For example: `docker desktop enable model-runner --tcp `. + + If you are running on Windows, also enable GPU-backed inference. + See [Enable Docker Model Runner](#enable-dmr-in-docker-desktop). + +2. Interact with it as documented in the previous section using `localhost` and the correct port. + +```bash +#!/bin/sh + + curl http://localhost:12434/engines/llama.cpp/v1/chat/completions \ + -H "Content-Type: application/json" \ + -d '{ + "model": "ai/smollm2", + "messages": [ + { + "role": "system", + "content": "You are a helpful assistant." + }, + { + "role": "user", + "content": "Please write 500 words about the fall of Rome." + } + ] + }' +``` + +#### From the host using a Unix socket + +To call the `chat/completions` OpenAI endpoint through the Docker socket from the host using `curl`: + +```bash +#!/bin/sh + +curl --unix-socket $HOME/.docker/run/docker.sock \ + localhost/exp/vDD4.40/engines/llama.cpp/v1/chat/completions \ + -H "Content-Type: application/json" \ + -d '{ + "model": "ai/smollm2", + "messages": [ + { + "role": "system", + "content": "You are a helpful assistant." + }, + { + "role": "user", + "content": "Please write 500 words about the fall of Rome." + } + ] + }' +``` + +## Known issues + +### `docker model` is not recognised + +If you run a Docker Model Runner command and see: + +```text +docker: 'model' is not a docker command +``` + +It means Docker can't find the plugin because it's not in the expected CLI plugins directory. + +To fix this, create a symlink so Docker can detect it: + +```console +$ ln -s /Applications/Docker.app/Contents/Resources/cli-plugins/docker-model ~/.docker/cli-plugins/docker-model +``` + +Once linked, rerun the command. + +### No safeguard for running oversized models + +Currently, Docker Model Runner doesn't include safeguards to prevent you from +launching models that exceed your system's available resources. Attempting to +run a model that is too large for the host machine may result in severe +slowdowns or may render the system temporarily unusable. This issue is +particularly common when running LLMs without sufficient GPU memory or system +RAM. + +### No consistent digest support in Model CLI + +The Docker Model CLI currently lacks consistent support for specifying models by image digest. As a temporary workaround, you should refer to models by name instead of digest. + +## Share feedback + +Thanks for trying out Docker Model Runner. Give feedback or report any bugs you may find through the **Give feedback** link next to the **Enable Docker Model Runner** setting. diff --git a/content/manuals/ai/model-runner/images/dmr-catalog.png b/content/manuals/ai/model-runner/images/dmr-catalog.png new file mode 100644 index 000000000000..15d8bd04df11 Binary files /dev/null and b/content/manuals/ai/model-runner/images/dmr-catalog.png differ diff --git a/content/manuals/ai/model-runner/images/dmr-logs.png b/content/manuals/ai/model-runner/images/dmr-logs.png new file mode 100644 index 000000000000..e2b2289e9886 Binary files /dev/null and b/content/manuals/ai/model-runner/images/dmr-logs.png differ diff --git a/content/manuals/ai/model-runner/images/dmr-run.png b/content/manuals/ai/model-runner/images/dmr-run.png new file mode 100644 index 000000000000..c12b3bd5fdd4 Binary files /dev/null and b/content/manuals/ai/model-runner/images/dmr-run.png differ diff --git a/content/manuals/billing/3d-secure.md b/content/manuals/billing/3d-secure.md index 32f815c8768b..028da65d26b7 100644 --- a/content/manuals/billing/3d-secure.md +++ b/content/manuals/billing/3d-secure.md @@ -7,7 +7,7 @@ weight: 40 > [!NOTE] > -> [Docker plan](../subscription/setup.md) payments support 3D secure authentication. +> [Docker subscription](../subscription/setup.md) payments support 3D secure authentication. 3D Secure (3DS) authentication incorporates an additional security layer for credit card transactions. If you’re making payments for your Docker billing in a region that requires 3DS, or using a payment method that requires 3DS, you’ll need to verify your identity to complete any transactions. The method used to verify your identity varies depending on your banking institution. diff --git a/content/manuals/billing/_index.md b/content/manuals/billing/_index.md index 0e2ea0f91cae..5429670c876f 100644 --- a/content/manuals/billing/_index.md +++ b/content/manuals/billing/_index.md @@ -12,7 +12,7 @@ grid_core: description: Learn how to add or update a payment method for your personal account or organization. link: /billing/payment-method/ icon: credit_score -- title: Update the billing information +- title: Update billing information description: Discover how to update the billing information for your personal account or organization. link: /billing/details/ icon: contract_edit @@ -32,8 +32,10 @@ grid_core: description: Discover how Docker billing supports 3DS and how to troubleshoot potential issues. link: /billing/3d-secure/ icon: wallet +aliases: + - /billing/docker-hub-pricing/ --- -Use the resources in this section to manage your billing and payment settings for your Docker subscription plans. +Use the resources in this section to manage your billing and payment settings for your Docker subscriptions. {{< grid items="grid_core" >}} diff --git a/content/manuals/billing/cycle.md b/content/manuals/billing/cycle.md index 9e461d054cb8..439a9c90d159 100644 --- a/content/manuals/billing/cycle.md +++ b/content/manuals/billing/cycle.md @@ -5,13 +5,15 @@ description: Learn to change your billing cycle for your Docker subscription keywords: billing, cycle, payments, subscription --- -You can pay for a subscription plan on a monthly or yearly billing cycle. You select your preferred billing cycle when you buy your subscription. +You can pay for a subscription on a monthly or yearly billing cycle. You select +your preferred billing cycle when you buy your subscription. > [!NOTE] > -> Business plan is available only on yearly billing cycle. +> Business subscriptions are available only on yearly billing cycle. -If you have a monthly billing cycle, you can choose to switch to an annual billing cycle. +If you have a monthly billing cycle, you can choose to switch to an annual +billing cycle. > [!NOTE] > @@ -19,38 +21,54 @@ If you have a monthly billing cycle, you can choose to switch to an annual billi When you change the billing cycle's duration: -- The next billing date reflects the new cycle. To find your next billing date, see [View renewal date](history.md#view-renewal-date). -- The subscription's start date resets. For example, if the start date of the monthly subscription is March 1st and the end date is April 1st, then after switching the billing duration to March 15th, 2024 the new start date is March 15th, 2024, and the new end date is March 15th, 2025. -- Any unused monthly subscription is prorated and applied as credit towards the new annual period. For example, if you switch from a $10 monthly subscription to a $100 annual plan, deducting the unused monthly value (in this case $5), the migration cost becomes $95 ($100 - $5). The renewal cost after March 15, 2025 is $100. +- The next billing date reflects the new cycle. To find your next billing date, +see [View renewal date](history.md#view-renewal-date). +- The subscription's start date resets. For example, if the start date of the +monthly subscription is March 1st and the end date is April 1st, then after +switching the billing duration to March 15th, 2024 the new start date is March +15th, 2024, and the new end date is March 15th, 2025. +- Any unused monthly subscription is prorated and applied as credit towards the +new annual period. For example, if you switch from a $10 monthly subscription to +a $100 annual subscription, deducting the unused monthly value +(in this case $5), the migration cost becomes $95 ($100 - $5). The renewal cost +after March 15, 2025 is $100. -{{< include "tax-compliance.md" >}} +{{% include "tax-compliance.md" %}} ## Personal account {{< tabs >}} -{{< tab name="Docker plan" >}} +{{< tab name="Docker subscription" >}} To change your billing cycle: -1. Sign in to [Docker Home](https://app.docker.com/). -2. Under Settings and administration, select **Billing**. -3. On the plans and usage page, select **Switch to annual billing**. -4. Verify your billing information. -5. Select **Continue to payment**. -6. Verify payment information and select **Upgrade subscription**. +1. Sign in to [Docker Home](https://app.docker.com/) and select +your organization. +1. Select **Billing**. +1. On the plans and usage page, select **Switch to annual billing**. +1. Verify your billing information. +1. Select **Continue to payment**. +1. Verify payment information and select **Upgrade subscription**. -The billing plans and usage page will now reflect your new annual plan details. +> [!NOTE] +> +> If you choose to pay using a US bank account, you must verify the account. For +> more information, see +[Verify a bank account](manuals/billing/payment-method.md#verify-a-bank-account). + +The billing plans and usage page will now reflect your new annual subscription +details. {{< /tab >}} -{{< tab name="Legacy Docker plan" >}} +{{< tab name="Legacy Docker subscription" >}} To change your billing cycle: 1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select your avatar in the top-right corner. -3. From the drop-down menu select **Billing**. -4. In the bottom-right of the **Plan** tab, select **Switch to annual billing**. -5. Review the information displayed on the **Change to an Annual subscription** page and select **Accept Terms and Purchase** to confirm. +1. Select your organization, then select **Billing**. +1. In the bottom-right of the **Plan** tab, select **Switch to annual billing**. +1. Review the information displayed on the **Change to an Annual subscription** +page and select **Accept Terms and Purchase** to confirm. {{< /tab >}} {{< /tabs >}} @@ -62,27 +80,34 @@ To change your billing cycle: > You must be an organization owner to make changes to the payment information. {{< tabs >}} -{{< tab name="Docker plan" >}} +{{< tab name="Docker subscription" >}} To change your organization's billing cycle: -1. Sign in to [Docker Home](https://app.docker.com/). -2. Under Settings and administration, select **Billing**. -3. On the plans and usage page, select **Switch to annual billing**. -4. Verify your billing information. -5. Select **Continue to payment**. -6. Verify payment information and select **Upgrade subscription**. +1. Sign in to [Docker Home](https://app.docker.com/) and select +your organization. +1. Select **Billing**. +1. On the plans and usage page, select **Switch to annual billing**. +1. Verify your billing information. +1. Select **Continue to payment**. +1. Verify payment information and select **Upgrade subscription**. + +> [!NOTE] +> +> If you choose to pay using a US bank account, you must verify the account. For +> more information, see +> [Verify a bank account](manuals/billing/payment-method.md#verify-a-bank-account). {{< /tab >}} -{{< tab name="Legacy Docker plan" >}} +{{< tab name="Legacy Docker subscription" >}} To change your organization's billing cycle: 1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select **Organizations** from the top-level navigation. -3. Select the organization that you want to change the payment method for. -4. In the bottom-right of the **Plan** tab, select **Switch to annual billing**. -5. Review the information displayed on the **Change to an Annual subscription** page and select **Accept Terms and Purchase** to confirm. +1. Select your organization, then select **Billing**. +1. Select **Switch to annual billing**. +1. Review the information displayed on the **Change to an Annual subscription** +page and select **Accept Terms and Purchase** to confirm. {{< /tab >}} {{< /tabs >}} \ No newline at end of file diff --git a/content/manuals/billing/details.md b/content/manuals/billing/details.md index d3cee9477e6d..f88b1a8545c6 100644 --- a/content/manuals/billing/details.md +++ b/content/manuals/billing/details.md @@ -9,33 +9,48 @@ You can update the billing information for your personal account or for an organ The billing information provided appears on all your billing invoices. The email address provided is where Docker sends all invoices and other [billing-related communication](#update-your-billing-invoice-email-address). -{{< include "tax-compliance.md" >}} +{{% include "tax-compliance.md" %}} ## Manage billing information ### Personal account {{< tabs >}} -{{< tab name="Docker plan" >}} +{{< tab name="Docker subscription" >}} To update your billing information: -1. Sign in to [Docker Home](https://app.docker.com/). -2. Under Settings and administration, select **Billing**. -3. Select **Billing information** from the left-hand navigation. -4. On your billing information card, select **Change**. -5. Update your billing contact and billing address information, then select **Update**. +1. Sign in to [Docker Home](https://app.docker.com/) and select your +organization. +1. Select **Billing**. +1. Select **Billing information** from the left-hand navigation. +1. On your billing information card, select **Change**. +1. Update your billing contact and billing address information. +1. Optional. To add or update a VAT ID, select the **I'm purchasing as a business** checkbox and enter your Tax ID. + + > [!IMPORTANT] + > + > Your VAT number must include your country prefix. For example, if you are + entering a VAT number for Germany, you would enter `DE123456789`. + +1. Select **Update**. {{< /tab >}} -{{< tab name="Legacy Docker plan" >}} +{{< tab name="Legacy Docker subscription" >}} To update your billing information: 1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select your avatar in the top-right corner. -3. From the drop-down menu, select **Billing**. -4. Select **Billing Address** and enter your updated billing information. -5. Select **Submit**. +1. Select your organization, then select **Billing**. +1. Select **Billing Address** and enter your updated billing information. +1. Optional. To add or update a VAT ID, enter your **Tax ID/VAT**. + + > [!IMPORTANT] + > + > Your VAT number must include your country prefix. For example, if you are + entering a VAT number for Germany, you would enter `DE123456789`. + +1. Select **Submit**. {{< /tab >}} {{< /tabs >}} @@ -47,28 +62,41 @@ To update your billing information: > You must be an organization owner to make changes to the billing information. {{< tabs >}} -{{< tab name="Docker plan" >}} +{{< tab name="Docker subscription" >}} To update your billing information: -1. Sign in to [Docker Home](https://app.docker.com/). -2. Under Settings and administration, select **Billing**. -3. Select **Billing information** from the left-hand navigation. -4. On your billing information card, select **Change**. -5. Update your billing contact and billing address information, then select **Update**. +1. Sign in to [Docker Home](https://app.docker.com/) and select your +organization. +1. Select **Billing**. +1. Select **Billing information** from the left-hand navigation. +1. On your billing information card, select **Change**. +1. Update your billing contact and billing address information. +1. Optional. To add or update a VAT ID, select the **I'm purchasing as a business** checkbox and enter your Tax ID. + + > [!IMPORTANT] + > + > Your VAT number must include your country prefix. For example, if you are + entering a VAT number for Germany, you would enter `DE123456789`. + +1. Select **Update**. {{< /tab >}} -{{< tab name="Legacy Docker plan" >}} +{{< tab name="Legacy Docker subscription" >}} To update your billing information: 1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select your avatar in the top-right corner. -3. From the drop-down menu select **Billing**. -4. Select the organization that you want to change the payment method for. -5. Select **Billing Address**. -6. Enter your updated billing information. -7. Select **Submit**. +1. Select your organization, then select **Billing**. +1. Select **Billing Address**. +1. Optional. To add or update a VAT ID, enter your **Tax ID/VAT**. + + > [!IMPORTANT] + > + > Your VAT number must include your country prefix. For example, if you are + entering a VAT number for Germany, you would enter `DE123456789`. + +1. Select **Submit**. {{< /tab >}} {{< /tabs >}} @@ -82,34 +110,35 @@ Docker sends the following billing-related emails: - Notifications of credit or debit card payment failures. - Notifications of credit or debit card expiration. - Confirmation of a cancelled subscription -- Reminders of subscription renewals for annual subscribers. This is sent 14 days before the renewal date. +- Reminders of subscription renewals for annual subscribers. This is sent 14 +days before the renewal date. You can update the email address that receives billing invoices at any time. ### Personal account {{< tabs >}} -{{< tab name="Docker plan" >}} +{{< tab name="Docker subscription" >}} To update your billing email address: -1. Sign in to [Docker Home](https://app.docker.com/). -2. Under Settings and administration, select **Billing**. -3. Select **Billing information** from the left-hand navigation. -4. On your billing information card, select **Change**. -5. Update your billing contact information and select **Update**. +1. Sign in to [Docker Home](https://app.docker.com/) and select your +organization. +1. Select **Billing**. +1. Select **Billing information** from the left-hand navigation. +1. On your billing information card, select **Change**. +1. Update your billing contact information and select **Update**. {{< /tab >}} -{{< tab name="Legacy Docker plan" >}} +{{< tab name="Legacy Docker subscription" >}} To update your billing email address: 1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select your avatar in the top-right corner. -3. From the drop-down menu select **Billing**. -4. Select **Billing Address**. -5. Update the email address in the **Billing contact** section. -6. Select **Submit**. +1. Select your organization, then select **Billing**. +1. Select **Billing Address**. +1. Update the email address in the **Billing contact** section. +1. Select **Submit**. {{< /tab >}} {{< /tabs >}} @@ -117,28 +146,28 @@ To update your billing email address: ### Organizations {{< tabs >}} -{{< tab name="Docker plan" >}} +{{< tab name="Docker subscription" >}} To update your billing email address: -1. Sign in to [Docker Home](https://app.docker.com/). -2. Under Settings and administration, select **Billing**. -3. Select **Billing information** from the left-hand navigation. -4. On your billing information card, select **Change**. -5. Update your billing contact information and select **Update**. +1. Sign in to [Docker Home](https://app.docker.com/) and select +your organization. +1. Select **Billing**. +1. Select **Billing information** from the left-hand navigation. +1. On your billing information card, select **Change**. +1. Update your billing contact information and select **Update**. {{< /tab >}} -{{< tab name="Legacy Docker plan" >}} +{{< tab name="Legacy Docker subscription" >}} To update your billing email address: 1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select your avatar in the top-right corner. -3. From the drop-down menu select **Billing**. -4. Select the name of the organization. -5. Select **Billing Address**. -6. Update the email address in the **Billing contact** section. -7. Select **Submit**. +1. Select your organization, then select **Billing**. +1. Select the name of the organization. +1. Select **Billing Address**. +1. Update the email address in the **Billing contact** section. +1. Select **Submit**. {{< /tab >}} {{< /tabs >}} diff --git a/content/manuals/billing/docker-hub-pricing.md b/content/manuals/billing/docker-hub-pricing.md deleted file mode 100644 index 9b97802cde7e..000000000000 --- a/content/manuals/billing/docker-hub-pricing.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: Docker Hub storage pricing -description: Learn how Docker Hub storage pricing is calculated -keywords: docker hub, storage, payments, billing, subscription -weight: 55 ---- - -This guide explains how Docker Hub storage is measured, calculated, and billed -to help you understand your storage consumption and costs. - -> [!NOTE] -> -> Docker Hub plan limits will take effect on March 1, 2025. No charge on Docker Hub image pulls -or storage will be incurred from December 10, 2024 and February 28, 2025. - -## How storage is measured - -Docker Hub measures storage using: -- Hourly measurement: Storage usage is recorded every hour and expressed in **GB-hours**. This value represents the total storage your repositories consume during each hour. -- Monthly aggregation: At the end of each month, hourly storage usage is divided by the number of hours in that month. - - For example, 900 GB-hours of storage in a 30-day month (720 hours) equates to 900 / 720 = 1.25 gigabytes of monthly storage (GB-month). - -## How storage is calculated - -Docker subscription plans include a specific amount of allocated -private repository storage: - -- Personal plan: Includes up to 2GB of storage. -- Pro plan: Includes up to 5GB of storage. -- Team plan: Includes up to 50GB of storage. -- Business plan: Includes up to 500GB of storage. - -Docker Hub determines additional charges based on your average monthly usage of private repository storage. - -If you go over your allocated private repository storage, you will incur overage -costs. To calculate overage costs the included storage for your plan is subtracted from your average monthly -usage. - -## Docker Hub consumption pricing - -At the end of the month, Docker calculates your total storage usage -and compares it to your plan's included amount. If applicable, the overage cost -is billed to your account as an overage invoice. - -There are two billing models for paying for additional Docker Hub storage: - -- Pre-pay: Pay in advance for a specified amount of storage. - - > [!NOTE] - > - > Pre-purchased storage expires at the end of your subscription period. - -- Post-pay: Receive an overage invoice for storage usage that exceeds your subscription plan's included amount -at the end of your billing cycle. - -### Storage carryover - -If you pre-pay for storage, your purchased storage is valid for the entire subscription period. You can use it any time during that period, and any unused portion will roll over to the next month until the subscription period ends. - -In the following example, a customer with an annual Business plan pre-pays for 500GB of storage for the year. Their plan includes a base allocation of 500GB of storage per month. -- In January, they use 510 GB-month, exceed their base allocation, and use 10GB from their pre-paid storage. Their remaining pre-paid -storage is 490GB. -- In February, they use 450 GB-month, and do not exceed their base allocation. They do not use any of their pre-paid storage, so it remains at 490GB. -- In March, they use 600 GB-month, exceed their base allocation, and use 100GB from their pre-paid storage. Their remaining pre-paid storage is 390GB. - -| | January | February | March | -|---------------------------------|----------|----------|---------| -| Included GB-month | 500 | 500 | 500 | -| Used storage in month | 510 | 450 | 600 | -| Overage in GB-month | 10 | 0 | 100 | -| Remaining pre-purchased storage | 490 | 490 | 390 | - -At the end of March, the customer has 390GB of pre-purchased storage left to use for the rest of the year. - -## Examples - -### Business plan with pre-pay - -In the following example, a customer with a Business plan has 500GB included in their subscription plan. They pre-pay -for 1700 GB. -- In January, they use 100 GB-month, meaning they did not use any of their pre-pay storage. Their pre-pay storage rolls over to the next month. -- In February, they use 650 GB-month, exceed their base allocation, and use 150GB from their pre-pay storage. -- In March, they use 1800 GB-month, exceed their base allocation, and use 1300GB from their pre-pay storage. -- In April, they use 950 GB-month, exceed their base allocation, and going over their pre-pay storage. This results in an invoice of $14.00 for the storage overage. - -| | January | February | March | April | -|--------------------------|---------|----------|-------|--------| -| Included GB-month | 500 | 500 | 500 | 500 | -| Pre-purchased GB | 1700 | 1700 | 1700 | 1700 | -| Used storage in month | 100 | 650 | 1800 | 950 | -| Remaining pre-purchased | 1700 | 1550 | 250 | -200 | -| Overage invoice | $0.00 | $0.00 | $0.00 | $14.00 | - -For information on storage pricing, see the [Docker Pricing](https://www.docker.com/pricing/) page. - -### Business plan with post-pay - -In the following example, a customer with a Business plan has 500GB included in their subscription plan. They do -not pre-pay for additional storage consumption. -- In January, they use 100 GB-month and do not exceed their base allocation. -- In February, they use 650 GB-month, going over their base allocation by 150 GB-month. They are sent -an overage invoice for $10.50. -- In March, they use 1800 GB-month, going over their base allocation by 1300 GB-month. They are sent -an overage invoice for $91.00. -- In April, they use 950 GB-month, going over their base allocation by 450 GB-month. They are sent an -overage invoice for $31.50. - -| | January | February | March | April | -|-----------------------------------|---------|----------|---------|--------| -| Included GB-month | 500 | 500 | 500 | 500 | -| Used storage in month | 100 | 650 | 1800 | 950 | -| Overage in GB-month | 0 | 150 | 1300 | 450 | -| Overage invoice | $0.00 | $10.50 | $130.00 | $45.00 | - -For information on storage pricing, see the [Docker Pricing](https://www.docker.com/pricing/) page. \ No newline at end of file diff --git a/content/manuals/billing/faqs.md b/content/manuals/billing/faqs.md index 623b33e7dff0..455fa879ef12 100644 --- a/content/manuals/billing/faqs.md +++ b/content/manuals/billing/faqs.md @@ -16,6 +16,8 @@ weight: 60 - JCB - Diners - UnionPay +- Link +- ACH transfer with a [verified](manuals/billing/payment-method.md#verify-a-bank-account) US bank account ### What currency is supported? @@ -29,15 +31,32 @@ If your subscription payment fails, there is a grace period of 15 days, includin - 5 days after the previous attempt - 7 days after the previous attempt -Docker also sends an email notification `Action Required - Credit Card Payment Failed` with an attached unpaid invoice after each failed payment attempt. +Docker also sends an email notification `Action Required - Credit Card Payment Failed` with an attached unpaid invoice after each failed payment attempt. -Once the grace period is over and the invoice is still not paid, the subscription downgrades to a free plan and all paid features are disabled. +Once the grace period is over and the invoice is still not paid, the subscription downgrades to a free subscription and all paid features are disabled. + +### Can I manually retry a failed payment? + +No. Docker retries failed payments on a [retry schedule](/manuals/billing/faqs.md#what-happens-if-my-subscription-payment-fails). + +To ensure a retired payment is successful, verify your default payment is +updated. If you need to update your default payment method, see +[Manage payment method](/manuals/billing/payment-method.md#manage-payment-method). ### Does Docker collect sales tax and/or VAT? -Starting July 1, 2024, Docker will begin collecting sales tax on subscription fees in compliance with state regulations for customers in the United States. For global customers subject to VAT, the implementation will start rolling out on July 1, 2024. Note that while the rollout begins on this date, VAT charges may not apply to all applicable subscriptions immediately. +Docker collects sales tax and/or VAT from the following: + +- For United States customers, Docker began collecting sales tax on July 1, 2024. +- For European customers, Docker began collecting VAT on March 1, 2025. +- For United Kingdom customers, Docker began collecting VAT on May 1, 2025. + +To ensure that tax assessments are correct, make sure that your billing +information and VAT/Tax ID, if applicable, are updated. See +[Update the billing information](/billing/details/). -To ensure that tax assessments are correct, make sure that your billing information and VAT/Tax ID, if applicable, are updated. See [Update the billing information](/billing/details/). +If you're exempt from sales tax, see +[Register a tax certificate](/billing/tax-certificate/). ### How do I certify my tax exempt status? @@ -49,4 +68,4 @@ Contact the [Docker Sales Team](https://www.docker.com/company/contact). ### Do I need to do anything at the end of my subscription term? -No. All monthly and annual subscriptions are automatically renewed at the end of the term using the original form of payment. +No. All monthly and annual subscriptions are automatically renewed at the end of the term using the original form of payment. \ No newline at end of file diff --git a/content/manuals/billing/history.md b/content/manuals/billing/history.md index 534c7fb7d8f8..0e64a9c75a61 100644 --- a/content/manuals/billing/history.md +++ b/content/manuals/billing/history.md @@ -3,11 +3,13 @@ title: View billing history weight: 40 description: Discover how to view your billing history in Docker Hub keywords: payments, billing, subscription, invoices, renewals, invoice management, billing administration +aliases: + - /billing/core-billing/history/ --- In this section, learn how you can view your billing history, manage your invoices, and verify your renewal date. All monthly and annual subscriptions are automatically renewed at the end of the term using the original form of payment. -{{< include "tax-compliance.md" >}} +{{% include "tax-compliance.md" %}} ## Invoices @@ -33,13 +35,13 @@ You can’t make changes to a paid or unpaid billing invoice. When you update yo ### View renewal date {{< tabs >}} -{{< tab name="Docker plan" >}} +{{< tab name="Docker subscription" >}} -You receive your invoice when the subscription renews. To verify your renewal date, sign in to the [Docker Home Billing](https://app.docker.com/billing). Your renewal date and amount are displayed on your subscription plan card. +You receive your invoice when the subscription renews. To verify your renewal date, sign in to [Docker Billing](https://app.docker.com/billing). Your renewal date and amount are displayed on your subscription card. {{< /tab >}} -{{< tab name="Legacy Docker plan" >}} +{{< tab name="Legacy Docker subscription" >}} You receive your invoice when the subscription renews. To verify your renewal date: @@ -58,31 +60,44 @@ You receive your invoice when the subscription renews. To verify your renewal da > If the VAT number field is not available, complete the [Contact Support form](https://hub.docker.com/support/contact/). This field may need to be manually added. {{< tabs >}} -{{< tab name="Docker plan" >}} +{{< tab name="Docker subscription" >}} To add or update your VAT number: -1. Sign in to [Docker Home](https://app.docker.com/). -2. Under Settings and administration, select **Billing**. -3. Select **Billing information** from the left-hand menu. -4. Select **Change** on your billing information card. -5. Ensure the **I'm purchasing as a business** checkbox is checked. -6. Enter your VAT number in the Tax ID section. -7. Select **Update**. +1. Sign in to [Docker Home](https://app.docker.com/) and choose your +organization. +1. Select **Billing**. +1. Select **Billing information** from the left-hand menu. +1. Select **Change** on your billing information card. +1. Ensure the **I'm purchasing as a business** checkbox is checked. +1. Enter your VAT number in the Tax ID section. + + > [!IMPORTANT] + > + > Your VAT number must include your country prefix. For example, if you are + entering a VAT number for Germany, you would enter `DE123456789`. + +1. Select **Update**. Your VAT number will be included on your next invoice. {{< /tab >}} -{{< tab name="Legacy Docker plan" >}} +{{< tab name="Legacy Docker subscription" >}} To add or update your VAT number: 1. Sign in to [Docker Hub](https://hub.docker.com). -2. For user accounts, Select your avatar in the top-right corner, then **Billing**. For organizations, select the name of the organization. -3. Select the **Billing address** link. -4. In the **Billing Information** section, select **Update information**. -5. Enter your VAT number in the Tax ID section. -6. Select **Save**. +1. Select your organization, then select **Billing**. +1. Select the **Billing address** link. +1. In the **Billing Information** section, select **Update information**. +1. Enter your VAT number in the Tax ID section. + + > [!IMPORTANT] + > + > Your VAT number must include your country prefix. For example, if you are + entering a VAT number for Germany, you would enter `DE123456789`. + +1. Select **Save**. Your VAT number will be included on your next invoice. @@ -96,28 +111,28 @@ You can view the billing history and download past invoices for a personal accou ### Personal account {{< tabs >}} -{{< tab name="Docker plan" >}} +{{< tab name="Docker subscription" >}} To view billing history: -1. Sign in to [Docker Home](https://app.docker.com/). -2. Under Settings and administration, select **Billing**. -3. Select **Invoices** from the left-hand menu. -4. Optional. Select the **Invoice number** to open invoice details. -5. Optional. Select the **Download** button to download an invoice. +1. Sign in to [Docker Home](https://app.docker.com/) and choose your +organization. +1. Select **Billing**. +1. Select **Invoices** from the left-hand menu. +1. Optional. Select the **Invoice number** to open invoice details. +1. Optional. Select the **Download** button to download an invoice. {{< /tab >}} -{{< tab name="Legacy Docker plan" >}} +{{< tab name="Legacy Docker subscription" >}} To view billing history: 1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select your avatar in the top-right corner. -3. From the drop-down menu select **Billing**. -4. Select the **Payment methods and billing history** link. - You can find your past invoices in the **Invoice History** section. +1. Select your organization, then select **Billing**. +1. Select the **Payment methods and billing history** link. -From here you can download an invoice. +You can find your past invoices in the **Invoice History** section, where +you can download an invoice. {{< /tab >}} {{< /tabs >}} @@ -129,28 +144,28 @@ From here you can download an invoice. > You must be an owner of the organization to view the billing history. {{< tabs >}} -{{< tab name="Docker plan" >}} +{{< tab name="Docker subscription" >}} To view billing history: -1. Sign in to [Docker Home](https://app.docker.com/). -2. Under Settings and administration, select **Billing**. -3. Select **Invoices** from the left-hand menu. -4. Optional. Select the **invoice number** to open invoice details. -5. Optional. Select the **download** button to download an invoice. +1. Sign in to [Docker Home](https://app.docker.com/) and select your +organization. +1. Select **Billing**. +1. Select **Invoices** from the left-hand menu. +1. Optional. Select the **invoice number** to open invoice details. +1. Optional. Select the **download** button to download an invoice. {{< /tab >}} -{{< tab name="Legacy Docker plan" >}} +{{< tab name="Legacy Docker subscription" >}} To view billing history: 1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select your avatar in the top-right corner. -3. From the drop-down menu select **Billing**. -4. Select the **Payment methods and billing history** link. - You can find your past invoices in the **Invoice History** section. +1. Select your organization, then select **Billing**. +1. Select the **Payment methods and billing history** link. -From here you can download an invoice. +You can find your past invoices in the **Invoice History** section, where you +can download an invoice. {{< /tab >}} {{< /tabs >}} diff --git a/content/manuals/billing/payment-method.md b/content/manuals/billing/payment-method.md index 8c9dc7eb4396..9354051d16e1 100644 --- a/content/manuals/billing/payment-method.md +++ b/content/manuals/billing/payment-method.md @@ -3,8 +3,8 @@ title: Add or update a payment method weight: 20 description: Learn how to add or update a payment method in Docker Hub keywords: payments, billing, subscription, supported payment methods, failed payments, coupons -billing: -- /billing/core-billing/payment-method/ +alisases: + - /billing/core-billing/payment-method/ --- This page describes how to add or update a payment method for your personal account or for an organization. @@ -13,53 +13,80 @@ You can add a payment method or update your account's existing payment method at > [!IMPORTANT] > -> If you want to remove all payment methods, you must first downgrade your subscription to a free plan. See [Downgrade](../subscription/change.md). +> If you want to remove all payment methods, you must first downgrade your subscription to a free subscription. See [Downgrade](../subscription/change.md). The following payment methods are supported: -- Visa -- MasterCard -- American Express -- Discover -- JCB -- Diners -- UnionPay +- Cards + - Visa + - MasterCard + - American Express + - Discover + - JCB + - Diners + - UnionPay +- Wallets + - Stripe Link +- Bank accounts + - ACH transfer with a [verified](manuals/billing/payment-method.md#verify-a-bank-account) US bank account All currency, for example the amount listed on your billing invoice, is in United States dollar (USD). -{{< include "tax-compliance.md" >}} +{{% include "tax-compliance.md" %}} ## Manage payment method ### Personal account {{< tabs >}} -{{< tab name="Docker plan" >}} +{{< tab name="Docker subscription" >}} To add a payment method: -1. Sign in to [Docker Home](https://app.docker.com/). -2. Under Settings and administration, select **Billing**. -3. Select **Payment methods** from the left-hand menu. -4. Select **Add payment method**. -5. Enter your new payment information. -6. Select **Add**. -7. Optional. You can set a new default payment method by selecting the **Set as default** action. -8. Optional. You can remove non-default payment methods by selecting the **Delete** action. +1. Sign in to [Docker Home](https://app.docker.com/) and select your +organization. +1. Select **Billing**. +1. Select **Payment methods** from the left-hand menu. +1. Select **Add payment method**. +1. Enter your new payment information: + - If you are adding a card: + - Select **Card** and fill out the card information form. + - If you are adding a Link payment: + - Select **Secure, 1-click checkout with Link** and enter your Link **email address** and **phone number**. + - If you are not an existing Link customer, you must fill out the card information form to store a card for Link payments. + - If you are adding a bank account: + - Select **US bank account**. + - Verify your **Email** and **Full name**. + - If your bank is listed, select your bank's name. + - If your bank is not listed, select **Search for your bank**. + - To verify your bank account, see [Verify a bank account](manuals/billing/payment-method.md#verify-a-bank-account). +1. Select **Add payment method**. +1. Optional. You can set a new default payment method by selecting the **Set as default** action. +1. Optional. You can remove non-default payment methods by selecting the **Delete** action. + +> [!NOTE] +> +> If you want to set a US bank account as your default payment method, you must +> verify the account first. {{< /tab >}} -{{< tab name="Legacy Docker plan" >}} +{{< tab name="Legacy Docker subscription" >}} To add a payment method: 1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select your avatar in the top-right corner. -3. From the drop-down menu select **Billing**. -4. Select the **Payment methods and billing history** link. -5. In the **Payment method** section, select **Add payment method**. -6. Enter your new payment information, then select **Add**. -7. Select the **Actions** icon, then select **Make default** to ensure that your new payment method applies to all purchases and subscriptions. -8. Optional. You can remove non-default payment methods by selecting the **Actions** icon. Then, select **Delete**. +1. Select **Billing**. +1. Select the **Payment methods** link. +1. Select **Add payment method**. +1. Enter your new payment information: + - If you are adding a card: + - Select **Card** and fill out the card information form. + - If you are adding a Link payment: + - Select **Secure, 1-click checkout with Link** and enter your Link **email address** and **phone number**. + - If you are not an existing Link customer, you must fill out the card information form to store a card for Link payments. +1. Select **Add**. +1. Select the **Actions** icon, then select **Make default** to ensure that your new payment method applies to all purchases and subscriptions. +1. Optional. You can remove non-default payment methods by selecting the **Actions** icon. Then, select **Delete**. {{< /tab >}} {{< /tabs >}} @@ -71,40 +98,102 @@ To add a payment method: > You must be an organization owner to make changes to the payment information. {{< tabs >}} -{{< tab name="Docker plan" >}} +{{< tab name="Docker subscription" >}} To add a payment method: -1. Sign in to [Docker Home](https://app.docker.com/). -2. Under Settings and administration, select **Billing**. -3. Choose your organization from the top-left drop-down. -4. Select **Payment methods** from the left-hand menu. -5. Select **Add payment method**. -6. Enter your new payment information. -7. Select **Add**. -8. Optional. You can set a new default payment method by selecting the **Set as default** action. -9. Optional. You can remove non-default payment methods by selecting the **Delete** action. +1. Sign in to [Docker Home](https://app.docker.com/) and select your +organization. +1. Select **Billing**. +1. Select **Payment methods** from the left-hand menu. +1. Select **Add payment method**. +1. Enter your new payment information: + - If you are adding a card: + - Select **Card** and fill out the card information form. + - If you are adding a Link payment: + - Select **Secure, 1-click checkout with Link** and enter your Link **email address** and **phone number**. + - If you are not an existing Link customer, you must fill out the card information form to store a card for Link payments. + - If you are adding a bank account: + - Select **US bank account**. + - Verify your **Email** and **Full name**. + - If your bank is listed, select your bank's name. + - If your bank is not listed, select **Search for your bank**. + - To verify your bank account, see [Verify a bank account](manuals/billing/payment-method.md#verify-a-bank-account). +1. Select **Add payment method**. +1. Select **Add payment method**. +1. Optional. You can set a new default payment method by selecting the **Set as default** action. +1. Optional. You can remove non-default payment methods by selecting the **Delete** action. + +> [!NOTE] +> +> If you want to set a US bank account as your default payment method, you must +> verify the account first. {{< /tab >}} -{{< tab name="Legacy Docker plan" >}} +{{< tab name="Legacy Docker subscription" >}} To add a payment method: 1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select your avatar in the top-right corner. -3. From the drop-down menu select **Billing**. -4. Select the organization account you want to update. -5. Select the **Payment methods and billing history** link. -6. In the **Payment Method** section, select **Add payment method**. -7. Enter your new payment information, then select **Add**. -8. Select the **Actions** icon, then select **Make default** to ensure that your new payment method applies to all purchases and subscriptions. -9. Optional. You can remove non-default payment methods by selecting the **Actions** icon. Then, select **Delete**. +1. Select your organization, then select **Billing**. +1. Select the **Payment methods** link. +1. Select **Add payment method**. +1. Enter your new payment information: + - If you are adding a card: + - Select **Card** and fill out the card information form. + - If you are adding a Link payment: + - Select **Secure, 1-click checkout with Link** and enter your Link **email address** and **phone number**. + - If you are not an existing Link customer, you must fill out the card information form to store a card for Link payments. +1. Select **Add payment method**. +1. Select the **Actions** icon, then select **Make default** to ensure that your new payment method applies to all purchases and subscriptions. +1. Optional. You can remove non-default payment methods by selecting the **Actions** icon. Then, select **Delete**. {{< /tab >}} {{< /tabs >}} +## Verify a bank account + +There are two ways to verify a bank account as a payment method: + +- Instant verification: Docker supports several major banks for instant verification. +- Manual verification: All other banks must be verified manually. + +### Instant verification + +To verify your bank account instantly, you must sign in to your bank account +from the Docker billing flow: + +1. Choose **US bank account** as your payment method. +1. Verify your **Email** and **Full name**. +1. If your bank is listed, select your bank's name or select **Search for your bank**. +1. Sign in to your bank and review the terms and conditions. This agreement +allows Docker to debit payments from your connected bank account. +1. Select **Agree and continue**. +1. Select an account to link and verify, and select **Connect account**. + +When the account is verified, you will see a success message in the pop-up modal. + +### Manual verification + +To verify your bank account manually, you must enter the micro-deposit amount from your bank statement: + +1. Choose **US bank account** as your payment method. +1. Verify your **Email** and **First and last name**. +1. Select **Enter bank details manually instead**. +1. Enter your bank details: **Routing number** and **Account number**. +1. Select **Submit**. +1. You will receive an email with instructions on how to manually verify. + +Manual verification uses micro-deposits. You should see a small deposit +(e.g. $-0.01) in your bank account in 1-2 business days. Open your manual verification email and enter the amount of this deposit to verify your account. + ## Failed payments +> [!NOTE] +> +> You can't manually retry a failed payment. Docker will retry failed payments +based on the retry schedule. + If your subscription payment fails, there is a grace period of 15 days, including the due date. Docker retries to collect the payment 3 times using the following schedule: - 3 days after the due date @@ -113,7 +202,7 @@ If your subscription payment fails, there is a grace period of 15 days, includin Docker also sends an email notification `Action Required - Credit Card Payment Failed` with an attached unpaid invoice after each failed payment attempt. -Once the grace period is over and the invoice is still not paid, the subscription downgrades to a free plan and all paid features are disabled. +Once the grace period is over and the invoice is still not paid, the subscription downgrades to a free subscription and all paid features are disabled. ## Redeem a coupon diff --git a/content/manuals/billing/tax-certificate.md b/content/manuals/billing/tax-certificate.md index ecac453404ae..2d3ba9891ed4 100644 --- a/content/manuals/billing/tax-certificate.md +++ b/content/manuals/billing/tax-certificate.md @@ -5,9 +5,9 @@ keywords: billing, renewal, payments, tax weight: 50 --- -If you're a customer in the United States and you're exempt from sales tax, you can register a valid tax exemption certificate with Docker's Support team. If you're a global customer subject to VAT, make sure that you provide your [VAT number](/billing/history/#include-your-vat-number-on-your-invoice). +If you're a customer in the United States and you're exempt from sales tax, you can register a valid tax exemption certificate with Docker's Support team. If you're a global customer subject to VAT, make sure that you provide your [VAT number](/billing/history/#include-your-vat-number-on-your-invoice) including your VAT country prefix. -{{< include "tax-compliance.md" >}} +{{% include "tax-compliance.md" %}} ## Prerequisites diff --git a/content/manuals/build-cloud/_index.md b/content/manuals/build-cloud/_index.md index a9072b04a963..6cb41a0b53da 100644 --- a/content/manuals/build-cloud/_index.md +++ b/content/manuals/build-cloud/_index.md @@ -69,4 +69,4 @@ Once you've signed up and created a builder, continue by [setting up the builder in your local environment](./setup.md). For information about roles and permissions related to Docker Build Cloud, see -[Roles and Permissions](/manuals/security/for-admins/roles-and-permissions.md#docker-build-cloud). +[Roles and Permissions](/manuals/enterprise/security/roles-and-permissions.md#docker-build-cloud-permissions). diff --git a/content/manuals/build-cloud/builder-settings.md b/content/manuals/build-cloud/builder-settings.md new file mode 100644 index 000000000000..f5e91e9c50e0 --- /dev/null +++ b/content/manuals/build-cloud/builder-settings.md @@ -0,0 +1,90 @@ +--- +title: Builder settings +description: Set your builder settings relating to private registries, disk allocation . +keywords: build, cloud build, optimize, remote, local, cloud, registry, package repository, vpn +--- + +The **Builder settings** page in Docker Build Cloud lets you configure disk allocation, private resource access, and firewall settings for your cloud builders in your organization. These configurations help optimize storage, enable access to private registries, and secure outbound network traffic. + +## Storage and cache management + +### Disk allocation + +The **Disk allocation** setting lets you control how much of the available +storage is dedicated to the build cache. A lower allocation increases +storage available for active builds. + +To make disk allocation changes, navigate to **Builder settings** in Docker +Build Cloud and then adjust the **Disk allocation** slider to specify the +percentage of storage used for build caching. + +Any changes take effect immediately. + +### Build cache space + +Your subscription includes the following Build cache space: + +| Subscription | Build cache space | +|--------------|-------------------| +| Personal | N/A | +| Pro | 50GB | +| Team | 100GB | +| Business | 200GB | + +### Multi-architecture storage allocation + +Docker Build Cloud automatically provisions builders for both amd64 and arm64 architectures. Your total build cache space is split equally between these +two builders: + +- Pro (50GB total): 25GB for amd64 builder + 25GB for arm64 builder +- Team (100GB total): 50GB for amd64 builder + 50GB for arm64 builder +- Business (200GB total): 100GB for amd64 builder + 100GB for arm64 builder + +> [!IMPORTANT] +> +> If you only build for one architecture, be aware that your effective cache +space is half of your subscription's total allocation. + +### Get more build cache space + +To get more Build cache space, [upgrade your subscription](/manuals/subscription/scale.md). + +> [!TIP] +> +> If you build large images, consider allocating less storage for caching to +leave more space for active builds. + +## Private resource access + +Private resource access lets cloud builders pull images and packages from private resources. This feature is useful when builds rely on self-hosted artifact repositories or private OCI registries. + +For example, if your organization hosts a private [PyPI](https://pypi.org/) repository on a private network, Docker Build Cloud would not be able to access it by default, since the cloud builder is not connected to your private network. + +To enable your cloud builders to access your private resources, enter the host name and port of your private resource and then select **Add**. + +### Authentication + +If your internal artifacts require authentication, make sure that you +authenticate with the repository either before or during the build. For +internal package repositories for npm or PyPI, use [build secrets](/manuals/build/building/secrets.md) +to authenticate during the build. For internal OCI registries, use `docker +login` to authenticate before building. + +Note that if you use a private registry that requires authentication, you will +need to authenticate with `docker login` twice before building. This is because +the cloud builder needs to authenticate with Docker to use the cloud builder, +and then again to authenticate with the private registry. + +```console +$ echo $DOCKER_PAT | docker login docker.io -u --password-stdin +$ echo $REGISTRY_PASSWORD | docker login registry.example.com -u --password-stdin +$ docker build --builder --tag registry.example.com/ --push . +``` + +## Firewall + +Firewall settings let you restrict cloud builder egress traffic to specific IP addresses. This helps enhance security by limiting external network egress from the builder. + +1. Select the **Enable firewall: Restrict cloud builder egress to specific public IP address** checkbox. +2. Enter the IP address you want to allow. +3. Select **Add** to apply the restriction. diff --git a/content/manuals/build-cloud/ci.md b/content/manuals/build-cloud/ci.md index bd66ec53f422..49477182d55a 100644 --- a/content/manuals/build-cloud/ci.md +++ b/content/manuals/build-cloud/ci.md @@ -29,29 +29,53 @@ See [Loading build results](./usage/#loading-build-results) for details. > [!NOTE] > -> Builds on Docker Build Cloud have a timeout limit of two hours. Builds that -> run for longer than two hours are automatically cancelled. +> Builds on Docker Build Cloud have a timeout limit of 90 minutes. Builds that +> run for longer than 90 minutes are automatically cancelled. -{{< tabs >}} -{{< tab name="GitHub Actions" >}} +## Setting up credentials for CI/CD + +To enable your CI/CD system to build and push images using Docker Build Cloud, provide both an access token and a username. The type of token and the username you use depend on your account type and permissions. + +- If you are an organization administrator or have permission to create [organization access tokens (OAT)](/manuals/enterprise/security/access-tokens.md), use an OAT and set `DOCKER_ACCOUNT` to your Docker Hub organization name. +- If you do not have permission to create OATs or are using a personal account, use a [personal access token (PAT)](/security/access-tokens/) and set `DOCKER_ACCOUNT` to your Docker Hub username. + +### Creating access tokens + +#### For organization accounts + +If you are an organization administrator: + +- Create an [organization access token (OAT)](/manuals/enterprise/security/access-tokens.md). The token must have these permissions: + 1. **cloud-connect** scope + 2. **Read public repositories** permission + 3. **Repository access** with **Image push** permission for the target repository: + - Expand the **Repository** drop-down. + - Select **Add repository** and choose your target repository. + - Set the **Image push** permission for the repository. + +If you are not an organization administrator: + +- Ask your organization administrator for an access token with the permissions listed above, or use a personal access token. + +#### For personal accounts + +- Create a [personal access token (PAT)](/security/access-tokens/) with the following permissions: + 1. **Read & write** access. + - Note: Building with Docker Build Cloud only requires read access, but you need write access to push images to a Docker Hub repository. + + +## CI platform examples > [!NOTE] > -> Version 4.0.0 and later of `docker/build-push-action` and -> `docker/bake-action` builds images with [provenance attestations by -> default](/manuals/build/ci/github-actions/attestations.md#default-provenance). Docker -> Build Cloud automatically attempts to load images to the local image store if -> you don't explicitly push them to a registry. +> In your CI/CD configuration, set the following variables/secrets: +> - `DOCKER_ACCESS_TOKEN` — your access token (PAT or OAT). Use a secret to store the token. +> - `DOCKER_ACCOUNT` — your Docker Hub organization name (for OAT) or username (for PAT) +> - `CLOUD_BUILDER_NAME` — the name of the cloud builder you created in the [Docker Build Cloud Dashboard](https://app.docker.com/build/) > -> This results in a conflicting scenario where if you build a tagged image -> without pushing it to a registry, Docker Build Cloud attempts to load images -> containing attestations. But the local image store on the GitHub runner -> doesn't support attestations, and the image load fails as a result. -> -> If you want to load images built with `docker/build-push-action` together -> with Docker Build Cloud, you must disable provenance attestations by setting -> `provenance: false` in the GitHub Action inputs (or in `docker-bake.hcl` if -> you use Bake). +> This ensures your builds authenticate correctly with Docker Build Cloud. + +### GitHub Actions ```yaml name: ci @@ -68,27 +92,26 @@ jobs: - name: Login to Docker Hub uses: docker/login-action@v3 with: - username: ${{ vars.DOCKER_USER }} - password: ${{ secrets.DOCKER_PAT }} + username: ${{ vars.DOCKER_ACCOUNT }} + password: ${{ secrets.DOCKER_ACCESS_TOKEN }} - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 with: driver: cloud - endpoint: "/default" + endpoint: "${{ vars.DOCKER_ACCOUNT }}/${{ vars.CLOUD_BUILDER_NAME }}" # for example, "acme/default" install: true - name: Build and push uses: docker/build-push-action@v6 with: - tags: "" + tags: "" # for example, "acme/my-image:latest" # For pull requests, export results to the build cache. # Otherwise, push to a registry. outputs: ${{ github.event_name == 'pull_request' && 'type=cacheonly' || 'type=registry' }} ``` -{{< /tab >}} -{{< tab name="GitLab" >}} +### GitLab ```yaml default: @@ -97,7 +120,7 @@ default: - docker:24-dind before_script: - docker info - - echo "$DOCKER_PAT" | docker login --username "$DOCKER_USER" --password-stdin + - echo "$DOCKER_ACCESS_TOKEN" | docker login --username "$DOCKER_ACCOUNT" --password-stdin - | apk add curl jq ARCH=${CI_RUNNER_EXECUTABLE_ARCH#*/} @@ -105,11 +128,12 @@ default: mkdir -vp ~/.docker/cli-plugins/ curl --silent -L --output ~/.docker/cli-plugins/docker-buildx $BUILDX_URL chmod a+x ~/.docker/cli-plugins/docker-buildx - - docker buildx create --use --driver cloud ${DOCKER_ORG}/default + - docker buildx create --use --driver cloud ${DOCKER_ACCOUNT}/${CLOUD_BUILDER_NAME} variables: IMAGE_NAME: - DOCKER_ORG: + DOCKER_ACCOUNT: # your Docker Hub organization name (or username when using a personal account) + CLOUD_BUILDER_NAME: # the name of the cloud builder you created in the [Docker Build Cloud Dashboard](https://app.docker.com/build/) # Build multi-platform image and push to a registry build_push: @@ -133,8 +157,7 @@ build_cache: . ``` -{{< /tab >}} -{{< tab name="Circle CI" >}} +### Circle CI ```yaml version: 2.1 @@ -154,8 +177,8 @@ jobs: curl --silent -L --output ~/.docker/cli-plugins/docker-buildx $BUILDX_URL chmod a+x ~/.docker/cli-plugins/docker-buildx - - run: echo "$DOCKER_PAT" | docker login --username $DOCKER_USER --password-stdin - - run: docker buildx create --use --driver cloud "/default" + - run: echo "$DOCKER_ACCESS_TOKEN" | docker login --username $DOCKER_ --password-stdin + - run: docker buildx create --use --driver cloud "${DOCKER_ACCOUNT}/${CLOUD_BUILDER_NAME}" - run: | docker buildx build \ @@ -177,8 +200,8 @@ jobs: curl --silent -L --output ~/.docker/cli-plugins/docker-buildx $BUILDX_URL chmod a+x ~/.docker/cli-plugins/docker-buildx - - run: echo "$DOCKER_PAT" | docker login --username $DOCKER_USER --password-stdin - - run: docker buildx create --use --driver cloud "/default" + - run: echo "$DOCKER_ACCESS_TOKEN" | docker login --username $DOCKER_ --password-stdin + - run: docker buildx create --use --driver cloud "${DOCKER_ACCOUNT}/${CLOUD_BUILDER_NAME}" - run: | docker buildx build \ @@ -195,8 +218,7 @@ workflows: - build_push ``` -{{< /tab >}} -{{< tab name="Buildkite" >}} +### Buildkite The following example sets up a Buildkite pipeline using Docker Build Cloud. The example assumes that the pipeline name is `build-push-docker` and that you @@ -210,7 +232,7 @@ Add the following `environment` hook agent's hook directory: set -euo pipefail if [[ "$BUILDKITE_PIPELINE_NAME" == "build-push-docker" ]]; then - export DOCKER_PAT="" + export DOCKER_ACCESS_TOKEN="" fi ``` @@ -218,7 +240,8 @@ Create a `pipeline.yml` that uses the `docker-login` plugin: ```yaml env: - DOCKER_ORG: + DOCKER_ACCOUNT: # your Docker Hub organization name (or username when using a personal account) + CLOUD_BUILDER_NAME: # the name of the cloud builder you created in the [Docker Build Cloud Dashboard](https://app.docker.com/build/) IMAGE_NAME: steps: @@ -226,8 +249,8 @@ steps: key: build-push plugins: - docker-login#v2.1.0: - username: - password-env: DOCKER_PAT # the variable name in the environment hook + username: DOCKER_ACCOUNT + password-env: DOCKER_ACCESS_TOKEN # the variable name in the environment hook ``` Create the `build.sh` script: @@ -256,7 +279,7 @@ curl --silent -L --output $DOCKER_DIR/cli-plugins/docker-buildx $BUILDX_URL chmod a+x ~/.docker/cli-plugins/docker-buildx # Connect to your builder and set it as the default builder -docker buildx create --use --driver cloud "$DOCKER_ORG/default" +docker buildx create --use --driver cloud "${DOCKER_ACCOUNT}/${CLOUD_BUILDER_NAME}" # Cache-only image build docker buildx build \ @@ -273,8 +296,7 @@ docker buildx build \ . ``` -{{< /tab >}} -{{< tab name="Jenkins" >}} +### Jenkins ```groovy pipeline { @@ -282,9 +304,9 @@ pipeline { environment { ARCH = 'amd64' - DOCKER_PAT = credentials('docker-personal-access-token') - DOCKER_USER = credentials('docker-username') - DOCKER_ORG = '' + DOCKER_ACCESS_TOKEN = credentials('docker-access-token') + DOCKER_ACCOUNT = credentials('docker-account') + CLOUD_BUILDER_NAME = '' IMAGE_NAME = '' } @@ -297,8 +319,8 @@ pipeline { sh 'mkdir -vp ~/.docker/cli-plugins/' sh 'curl --silent -L --output ~/.docker/cli-plugins/docker-buildx $BUILDX_URL' sh 'chmod a+x ~/.docker/cli-plugins/docker-buildx' - sh 'echo "$DOCKER_PAT" | docker login --username $DOCKER_USER --password-stdin' - sh 'docker buildx create --use --driver cloud "$DOCKER_ORG/default"' + sh 'echo "$DOCKER_ACCESS_TOKEN" | docker login --username $DOCKER_ACCOUNT --password-stdin' + sh 'docker buildx create --use --driver cloud "${DOCKER_ACCOUNT}/${CLOUD_BUILDER_NAME}"' // Cache-only build sh 'docker buildx build --platform linux/amd64,linux/arm64 --tag "$IMAGE_NAME" --output type=cacheonly .' // Build and push a multi-platform image @@ -309,8 +331,7 @@ pipeline { } ``` -{{< /tab >}} -{{< tab name="Travis CI" >}} +### Travis CI ```yaml language: minimal @@ -321,10 +342,10 @@ services: env: global: - - IMAGE_NAME=username/repo + - IMAGE_NAME= # for example, "acme/my-image:latest" before_install: | - echo "$DOCKER_PAT" | docker login --username "$DOCKER_USER" --password-stdin + echo "$DOCKER_ACCESS_TOKEN" | docker login --username "$DOCKER_ACCOUNT" --password-stdin install: | set -e @@ -332,7 +353,7 @@ install: | mkdir -vp ~/.docker/cli-plugins/ curl --silent -L --output ~/.docker/cli-plugins/docker-buildx $BUILDX_URL chmod a+x ~/.docker/cli-plugins/docker-buildx - docker buildx create --use --driver cloud "/default" + docker buildx create --use --driver cloud "${DOCKER_ACCOUNT}/${CLOUD_BUILDER_NAME}" script: | docker buildx build \ @@ -341,13 +362,11 @@ script: | --tag "$IMAGE_NAME" . ``` -{{< /tab >}} -{{< tab name="BitBucket Pipelines" >}} +### BitBucket Pipelines ```yaml -# Prerequisites: $DOCKER_USER, $DOCKER_PAT setup as deployment variables +# Prerequisites: $DOCKER_ACCOUNT, $CLOUD_BUILDER_NAME, $DOCKER_ACCESS_TOKEN setup as deployment variables # This pipeline assumes $BITBUCKET_REPO_SLUG as the image name -# Replace in the `docker buildx create` command with your Docker org image: atlassian/default-image:3 @@ -361,8 +380,8 @@ pipelines: - BUILDX_URL=$(curl -s https://raw.githubusercontent.com/docker/actions-toolkit/main/.github/buildx-lab-releases.json | jq -r ".latest.assets[] | select(endswith(\"linux-$ARCH\"))") - curl --silent -L --output ~/.docker/cli-plugins/docker-buildx $BUILDX_URL - chmod a+x ~/.docker/cli-plugins/docker-buildx - - echo "$DOCKER_PAT" | docker login --username $DOCKER_USER --password-stdin - - docker buildx create --use --driver cloud "/default" + - echo "$DOCKER_ACCESS_TOKEN" | docker login --username $DOCKER_ACCOUNT --password-stdin + - docker buildx create --use --driver cloud "${DOCKER_ACCOUNT}/${CLOUD_BUILDER_NAME}" - IMAGE_NAME=$BITBUCKET_REPO_SLUG - docker buildx build --platform linux/amd64,linux/arm64 @@ -372,8 +391,7 @@ pipelines: - docker ``` -{{< /tab >}} -{{< tab name="Shell" >}} +### Shell script ```bash #!/bin/bash @@ -387,11 +405,11 @@ mkdir -vp ~/.docker/cli-plugins/ curl --silent -L --output ~/.docker/cli-plugins/docker-buildx $BUILDX_URL chmod a+x ~/.docker/cli-plugins/docker-buildx -# Login to Docker Hub. For security reasons $DOCKER_PAT should be a Personal Access Token. See https://docs.docker.com/security/for-developers/access-tokens/ -echo "$DOCKER_PAT" | docker login --username $DOCKER_USER --password-stdin +# Login to Docker Hub with an access token. See https://docs.docker.com/build-cloud/ci/#creating-access-tokens +echo "$DOCKER_ACCESS_TOKEN" | docker login --username $DOCKER_ACCOUNT --password-stdin # Connect to your builder and set it as the default builder -docker buildx create --use --driver cloud "/default" +docker buildx create --use --driver cloud "${DOCKER_ACCOUNT}/${CLOUD_BUILDER_NAME}" # Cache-only image build docker buildx build \ @@ -407,8 +425,7 @@ docker buildx build \ . ``` -{{< /tab >}} -{{< tab name="Docker Compose" >}} +### Docker Compose Use this implementation if you want to use `docker compose build` with Docker Build Cloud in CI. @@ -433,15 +450,12 @@ curl --silent -L --output ~/.docker/cli-plugins/docker-compose $COMPOSE_URL chmod a+x ~/.docker/cli-plugins/docker-buildx chmod a+x ~/.docker/cli-plugins/docker-compose -# Login to Docker Hub. For security reasons $DOCKER_PAT should be a Personal Access Token. See https://docs.docker.com/security/for-developers/access-tokens/ -echo "$DOCKER_PAT" | docker login --username $DOCKER_USER --password-stdin +# Login to Docker Hub with an access token. See https://docs.docker.com/build-cloud/ci/#creating-access-tokens +echo "$DOCKER_ACCESS_TOKEN" | docker login --username $DOCKER_ACCOUNT --password-stdin # Connect to your builder and set it as the default builder -docker buildx create --use --driver cloud "/default" +docker buildx create --use --driver cloud "${DOCKER_ACCOUNT}/${CLOUD_BUILDER_NAME}" # Build the image build docker compose build ``` - -{{< /tab >}} -{{< /tabs >}} diff --git a/content/manuals/build-cloud/release-notes.md b/content/manuals/build-cloud/release-notes.md new file mode 100644 index 000000000000..d37174d8fdf2 --- /dev/null +++ b/content/manuals/build-cloud/release-notes.md @@ -0,0 +1,16 @@ +--- +description: Learn about the latest features of Docker Build Cloud +keywords: docker build cloud, release notes, changelog, features, changes, delta, new, releases +title: Docker Build Cloud release notes +linkTitle: Release notes +tags: [Release notes] +--- + +This page contains information about the new features, improvements, known +issues, and bug fixes in Docker Build Cloud releases. + +## 2025-02-24 + +### New + +Added a new **Build settings** page where you can configure disk allocation, private resource access, and firewall settings for your cloud builders in your organization. These configurations help optimize storage, enable access to private registries, and secure outbound network traffic. \ No newline at end of file diff --git a/content/manuals/build-cloud/setup.md b/content/manuals/build-cloud/setup.md index 57c2e366314d..56f78180c42d 100644 --- a/content/manuals/build-cloud/setup.md +++ b/content/manuals/build-cloud/setup.md @@ -16,7 +16,8 @@ environment. To get started with Docker Build Cloud, you need to: - Download and install Docker Desktop version 4.26.0 or later. -- Sign up for a Docker Build Cloud subscription in the [Docker Build Cloud Dashboard](https://app.docker.com/build/). +- Create a cloud builder on the [Docker Build Cloud Dashboard](https://app.docker.com/build/). + - When you create the builder, choose a name for it (for example, `default`). You will use this name as `BUILDER_NAME` in the CLI steps below. ### Use Docker Build Cloud without Docker Desktop @@ -50,9 +51,17 @@ command, or using the Docker Desktop settings GUI. $ docker buildx create --driver cloud / ``` - Replace `ORG` with the Docker Hub namespace of your Docker organization. + Replace `` with the Docker Hub namespace of your Docker organization (or your username if you are using a personal account), and `` with the name you chose when creating the builder in the dashboard. + + This creates a local instance of the cloud builder named `cloud-ORG-BUILDER_NAME`. + + > [!NOTE] + > + > If your organization is `acme` and you named your builder `default`, use: + > ```console + > $ docker buildx create --driver cloud acme/default + > ``` -This creates a builder named `cloud-ORG-BUILDER_NAME`. {{< /tab >}} {{< tab name="Docker Desktop" >}} diff --git a/content/manuals/build-cloud/usage.md b/content/manuals/build-cloud/usage.md index 83bdde6ed037..891bb7df9cf2 100644 --- a/content/manuals/build-cloud/usage.md +++ b/content/manuals/build-cloud/usage.md @@ -186,13 +186,5 @@ It only changes the builder that's automatically selected to run your builds. ## Registries on internal networks -It isn't possible to use Docker Build Cloud with a private registry -or registry mirror on an internal network behind a VPN. -All endpoints that a cloud builder interacts with, -including OCI registries, must be accessible over the internet. - -> **Interested in trying out an experimental feature?** -> ->We are currently testing an experimental feature which lets cloud builders access internal resources. -> -> If you're interested in trying this feature, contact us using the [Support form](https://hub.docker.com/support/contact?topic=Docker+Build+Cloud&subject=Private+registry+access). +It is possible to use Docker Build Cloud with a [private registry](/manuals/build-cloud/builder-settings.md#private-resource-access) +or registry mirror on an internal network. diff --git a/content/manuals/build/bake/_index.md b/content/manuals/build/bake/_index.md index 5c9b5c15ab8c..ee9e59453cdd 100644 --- a/content/manuals/build/bake/_index.md +++ b/content/manuals/build/bake/_index.md @@ -6,8 +6,6 @@ aliases: - /build/customize/bake/ --- -{{< summary-bar feature_name="Build bake" >}} - Bake is a feature of Docker Buildx that lets you define your build configuration using a declarative file, as opposed to specifying a complex CLI expression. It also lets you run multiple builds concurrently with a single invocation. diff --git a/content/manuals/build/bake/compose-file.md b/content/manuals/build/bake/compose-file.md index 83244c47162b..e142133828c9 100644 --- a/content/manuals/build/bake/compose-file.md +++ b/content/manuals/build/bake/compose-file.md @@ -10,7 +10,7 @@ Bake supports the [Compose file format](/reference/compose-file/_index.md) to parse a Compose file and translate each service to a [target](reference.md#target). ```yaml -# docker-compose.yml +# compose.yaml services: webapp-dev: build: &build-dev @@ -57,15 +57,35 @@ $ docker buildx bake --print "context": ".", "dockerfile": "Dockerfile.webapp", "tags": ["docker.io/username/webapp:latest"], - "cache-from": ["docker.io/username/webapp:cache"], - "cache-to": ["docker.io/username/webapp:cache"] + "cache-from": [ + { + "ref": "docker.io/username/webapp:cache", + "type": "registry" + } + ], + "cache-to": [ + { + "ref": "docker.io/username/webapp:cache", + "type": "registry" + } + ] }, "webapp-release": { "context": ".", "dockerfile": "Dockerfile.webapp", "tags": ["docker.io/username/webapp:latest"], - "cache-from": ["docker.io/username/webapp:cache"], - "cache-to": ["docker.io/username/webapp:cache"], + "cache-from": [ + { + "ref": "docker.io/username/webapp:cache", + "type": "registry" + } + ], + "cache-to": [ + { + "ref": "docker.io/username/webapp:cache", + "type": "registry" + } + ], "platforms": ["linux/amd64", "linux/arm64"] } } @@ -86,7 +106,7 @@ where the command is executed and applied to compose definitions passed with `-f`. ```yaml -# docker-compose.yml +# compose.yaml services: webapp: image: docker.io/username/webapp:${TAG:-v1.0.0} @@ -132,7 +152,7 @@ the [special extension](/reference/compose-file/extension.md) field `x-bake` in your compose file to evaluate extra fields: ```yaml -# docker-compose.yml +# compose.yaml services: addon: image: ct-addon:bar @@ -180,7 +200,7 @@ $ docker buildx bake --print { "group": { "default": { - "targets": ["aws", "addon"] + "targets": ["addon", "aws"] } }, "target": { @@ -192,8 +212,22 @@ $ docker buildx bake --print "CT_TAG": "bar" }, "tags": ["ct-addon:foo", "ct-addon:alp"], - "cache-from": ["user/app:cache", "type=local,src=path/to/cache"], - "cache-to": ["type=local,dest=path/to/cache"], + "cache-from": [ + { + "ref": "user/app:cache", + "type": "registry" + }, + { + "src": "path/to/cache", + "type": "local" + } + ], + "cache-to": [ + { + "dest": "path/to/cache", + "type": "local" + } + ], "platforms": ["linux/amd64", "linux/arm64"], "pull": true }, @@ -205,9 +239,22 @@ $ docker buildx bake --print "CT_TAG": "bar" }, "tags": ["ct-fake-aws:bar"], - "secret": ["id=mysecret,src=./secret", "id=mysecret2,src=./secret2"], + "secret": [ + { + "id": "mysecret", + "src": "./secret" + }, + { + "id": "mysecret2", + "src": "./secret2" + } + ], "platforms": ["linux/arm64"], - "output": ["type=docker"], + "output": [ + { + "type": "docker" + } + ], "no-cache": true } } diff --git a/content/manuals/build/bake/expressions.md b/content/manuals/build/bake/expressions.md index 71e7ef7de72e..05620f0866cc 100644 --- a/content/manuals/build/bake/expressions.md +++ b/content/manuals/build/bake/expressions.md @@ -62,7 +62,7 @@ target "default" { dockerfile="Dockerfile" tags = [ "my-image:latest", - notequal("",TAG) ? "my-image:${TAG}": "", + notequal("",TAG) ? "my-image:${TAG}": "" ] } ``` diff --git a/content/manuals/build/bake/inheritance.md b/content/manuals/build/bake/inheritance.md index 6f4328579d1c..0d93cc7a579f 100644 --- a/content/manuals/build/bake/inheritance.md +++ b/content/manuals/build/bake/inheritance.md @@ -59,7 +59,7 @@ attributes: target "lint" { inherits = ["_common"] dockerfile = "./dockerfiles/lint.Dockerfile" - output = ["type=cacheonly"] + output = [{ type = "cacheonly" }] } target "docs" { diff --git a/content/manuals/build/bake/introduction.md b/content/manuals/build/bake/introduction.md index 5265a26e3878..fcc035a9a8fa 100644 --- a/content/manuals/build/bake/introduction.md +++ b/content/manuals/build/bake/introduction.md @@ -84,6 +84,11 @@ target "myapp" { } ``` +> [!TIP] +> +> Want a better editing experience for Bake files in VS Code? +> Check out the [Docker VS Code Extension (Beta)](https://marketplace.visualstudio.com/items?itemName=docker.docker) for linting, code navigation, and vulnerability scanning. + ## Next steps To learn more about using Bake, see the following topics: diff --git a/content/manuals/build/bake/overrides.md b/content/manuals/build/bake/overrides.md index c3b9f501a126..ff051976a697 100644 --- a/content/manuals/build/bake/overrides.md +++ b/content/manuals/build/bake/overrides.md @@ -55,15 +55,15 @@ If you don't specify any files, Bake will use the following lookup order: 3. `docker-compose.yml` 4. `docker-compose.yaml` 5. `docker-bake.json` -6. `docker-bake.override.json` -7. `docker-bake.hcl` +6. `docker-bake.hcl` +7. `docker-bake.override.json` 8. `docker-bake.override.hcl` If more than one Bake file is found, all files are loaded and merged into a single definition. Files are merged according to the lookup order. ```console -$ docker buildx bake bake --print +$ docker buildx bake --print [+] Building 0.0s (1/1) FINISHED => [internal] load local bake definitions 0.0s => => reading compose.yaml 45B / 45B 0.0s diff --git a/content/manuals/build/bake/remote-definition.md b/content/manuals/build/bake/remote-definition.md index bffe599c0857..15b1769f4acc 100644 --- a/content/manuals/build/bake/remote-definition.md +++ b/content/manuals/build/bake/remote-definition.md @@ -157,7 +157,9 @@ docker buildx bake -f bake.hcl -f cwd://local.hcl "https://github.com/crazy-max/ }, "target": "build", "output": [ - "type=cacheonly" + { + "type": "cacheonly" + } ] } } diff --git a/content/manuals/build/bake/targets.md b/content/manuals/build/bake/targets.md index 29c6a1d376b6..183eb939e92f 100644 --- a/content/manuals/build/bake/targets.md +++ b/content/manuals/build/bake/targets.md @@ -81,8 +81,8 @@ target "api" { target "tests" { dockerfile = "tests.Dockerfile" contexts = { - webapp = "target:webapp", - api = "target:api", + webapp = "target:webapp" + api = "target:api" } output = ["type=local,dest=build/tests"] context = "." diff --git a/content/manuals/build/builders/_index.md b/content/manuals/build/builders/_index.md index c926822df0e2..ccb8d9a6202e 100644 --- a/content/manuals/build/builders/_index.md +++ b/content/manuals/build/builders/_index.md @@ -69,11 +69,11 @@ selected when you invoke builds. Even though `docker build` is an alias for `docker buildx build`, there are subtle differences between the two commands. With Buildx, the build client and -the and daemon (BuildKit) are decoupled. This means you can use multiple +the daemon (BuildKit) are decoupled. This means you can use multiple builders from a single client, even remote ones. The `docker build` command always defaults to using the default builder that -comes bundled with the Docker Engine, for ensuring backwards compatibility with +comes bundled with the Docker Engine, to ensure backwards compatibility with older versions of the Docker CLI. The `docker buildx build` command, on the other hand, checks whether you've set a different builder as the default builder before it sends your build to BuildKit. diff --git a/content/manuals/build/builders/drivers/_index.md b/content/manuals/build/builders/drivers/_index.md index a75e455de620..de4f772c6411 100644 --- a/content/manuals/build/builders/drivers/_index.md +++ b/content/manuals/build/builders/drivers/_index.md @@ -10,7 +10,7 @@ aliases: --- Build drivers are configurations for how and where the BuildKit backend runs. -Driver settings are customizable and allows fine-grained control of the builder. +Driver settings are customizable and allow fine-grained control of the builder. Buildx supports the following drivers: - `docker`: uses the BuildKit library bundled into the Docker daemon. @@ -66,7 +66,7 @@ To build an image using a non-default driver and load it to the image store, ### Load by default -{{< introduced buildx 0.14.0 >}} +{{< summary-bar feature_name="Load by default" >}} You can configure the custom build drivers to behave in a similar way to the default `docker` driver, and load images to the local image store by default. diff --git a/content/manuals/build/builders/drivers/docker-container.md b/content/manuals/build/builders/drivers/docker-container.md index a71b9de1b4dc..025db83eba5d 100644 --- a/content/manuals/build/builders/drivers/docker-container.md +++ b/content/manuals/build/builders/drivers/docker-container.md @@ -1,5 +1,5 @@ --- -title: Docker container build driver +title: Docker container driver description: The Docker container driver runs BuildKit in a container image. keywords: build, buildx, driver, builder, docker-container aliases: diff --git a/content/manuals/build/builders/drivers/kubernetes.md b/content/manuals/build/builders/drivers/kubernetes.md index f15f83f9ebe0..115bb73cfd4a 100644 --- a/content/manuals/build/builders/drivers/kubernetes.md +++ b/content/manuals/build/builders/drivers/kubernetes.md @@ -246,7 +246,7 @@ that you want to support. ## Rootless mode The Kubernetes driver supports rootless mode. For more information on how -rootless mode works, and it's requirements, see +rootless mode works, and its requirements, see [here](https://github.com/moby/buildkit/blob/master/docs/rootless.md). To turn it on in your cluster, you can use the `rootless=true` driver option: diff --git a/content/manuals/build/building/base-images.md b/content/manuals/build/building/base-images.md index 32ae78d8c469..0c057046454b 100644 --- a/content/manuals/build/building/base-images.md +++ b/content/manuals/build/building/base-images.md @@ -22,9 +22,8 @@ For most cases, you don't need to create your own base image. Docker Hub contains a vast library of Docker images that are suitable for use as a base image in your build. [Docker Official Images](../../docker-hub/image-library/trusted-content.md#docker-official-images) -are specifically designed as a set of hardened, battle-tested images that -support a wide variety of platforms, languages, and frameworks. There are also -[Docker Verified +have clear documentation, promote best practices, and are regularly updated. +There are also [Docker Verified Publisher](../../docker-hub/image-library/trusted-content.md#verified-publisher-images) images, created by trusted publishing partners, verified by Docker. @@ -78,7 +77,7 @@ To run your new image, use the `docker run` command: $ docker run --rm hello ``` -This example image can only successfully execute as long as the `hello` binary +This example image can only be successfully executed as long as the `hello` binary doesn't have any runtime dependencies. Computer programs tend to depend on certain other programs or resources to exist in the runtime environment. For example: @@ -103,17 +102,17 @@ which you can also use to build Ubuntu images. For example, to create an Ubuntu base image: ```dockerfile -$ sudo debootstrap focal focal > /dev/null -$ sudo tar -C focal -c . | docker import - focal +$ sudo debootstrap noble noble > /dev/null +$ sudo tar -C noble -c . | docker import - noble sha256:81ec9a55a92a5618161f68ae691d092bf14d700129093158297b3d01593f4ee3 -$ docker run focal cat /etc/lsb-release +$ docker run noble cat /etc/lsb-release DISTRIB_ID=Ubuntu -DISTRIB_RELEASE=20.04 -DISTRIB_CODENAME=focal -DISTRIB_DESCRIPTION="Ubuntu 20.04 LTS" +DISTRIB_RELEASE=24.04 +DISTRIB_CODENAME=noble +DISTRIB_DESCRIPTION="Ubuntu 24.04.2 LTS" ``` There are more example scripts for creating base images in diff --git a/content/manuals/build/building/best-practices.md b/content/manuals/build/building/best-practices.md index 343c42b62c54..fc4f0ae92655 100644 --- a/content/manuals/build/building/best-practices.md +++ b/content/manuals/build/building/best-practices.md @@ -46,9 +46,9 @@ image. When choosing an image, ensure it's built from a trusted source and keep it small. - [Docker Official Images](https://hub.docker.com/search?image_filter=official) - are some of the most secure and dependable images on Docker Hub. Typically, - Docker Official images have few or no packages containing CVEs, and are - thoroughly reviewed by Docker and project maintainers. + are a curated collection that have clear documentation, promote best + practices, and are regularly updated. They provide a trusted starting point + for many applications. - [Verified Publisher](https://hub.docker.com/search?image_filter=store) images are high-quality images published and maintained by the organizations @@ -147,7 +147,7 @@ Limiting each container to one process is a good rule of thumb, but it's not a hard and fast rule. For example, not only can containers be [spawned with an init process](/manuals/engine/containers/multi-service_container.md), some programs might spawn additional processes of their own accord. For -instance, [Celery](https://docs.celeryproject.org/) can spawn multiple worker +instance, [Celery](https://docs.celeryq.dev/) can spawn multiple worker processes, and [Apache](https://httpd.apache.org/) can create one process per request. @@ -192,17 +192,17 @@ image. This is useful because it lets publishers update tags to point to newer versions of an image. And as an image consumer, it means you automatically get the new version when you re-build your image. -For example, if you specify `FROM alpine:3.19` in your Dockerfile, `3.19` -resolves to the latest patch version for `3.19`. +For example, if you specify `FROM alpine:3.21` in your Dockerfile, `3.21` +resolves to the latest patch version for `3.21`. ```dockerfile # syntax=docker/dockerfile:1 -FROM alpine:3.19 +FROM alpine:3.21 ``` -At one point in time, the `3.19` tag might point to version 3.19.1 of the +At one point in time, the `3.21` tag might point to version 3.21.1 of the image. If you rebuild the image 3 months later, the same tag might point to a -different version, such as 3.19.4. This publishing workflow is best practice, +different version, such as 3.21.4. This publishing workflow is best practice, and most publishers use this tagging strategy, but it isn't enforced. The downside with this is that you're not guaranteed to get the same for every @@ -213,16 +213,16 @@ To fully secure your supply chain integrity, you can pin the image version to a specific digest. By pinning your images to a digest, you're guaranteed to always use the same image version, even if a publisher replaces the tag with a new image. For example, the following Dockerfile pins the Alpine image to the -same tag as earlier, `3.19`, but this time with a digest reference as well. +same tag as earlier, `3.21`, but this time with a digest reference as well. ```dockerfile # syntax=docker/dockerfile:1 -FROM alpine:3.19@sha256:13b7e62e8df80264dbb747995705a986aa530415763a6c58f84a3ca8af9a5bcd +FROM alpine:3.21@sha256:a8560b36e8b8210634f77d9f7f9efd7ffa463e380b75e2e74aff4511df3ef88c ``` -With this Dockerfile, even if the publisher updates the `3.19` tag, your builds +With this Dockerfile, even if the publisher updates the `3.21` tag, your builds would still use the pinned image version: -`13b7e62e8df80264dbb747995705a986aa530415763a6c58f84a3ca8af9a5bcd`. +`a8560b36e8b8210634f77d9f7f9efd7ffa463e380b75e2e74aff4511df3ef88c`. While this helps you avoid unexpected changes, it's also more tedious to have to look up and include the image digest for base image versions manually each @@ -257,6 +257,11 @@ automatically build and tag a Docker image and test it. Follow these recommendations on how to properly use the [Dockerfile instructions](/reference/dockerfile.md) to create an efficient and maintainable Dockerfile. +> [!TIP] +> +> Want a better editing experience for Dockerfiles in VS Code? +> Check out the [Docker VS Code Extension (Beta)](https://marketplace.visualstudio.com/items?itemName=docker.docker) for linting, code navigation, and vulnerability scanning. + ### FROM Whenever possible, use current official images as the basis for your @@ -437,7 +442,7 @@ reduces the image size, since the apt cache isn't stored in a layer. Since the `RUN` statement starts with `apt-get update`, the package cache is always refreshed prior to `apt-get install`. -Official Debian and Ubuntu images [automatically run `apt-get clean`](https://github.com/moby/moby/blob/03e2923e42446dbb830c654d0eec323a0b4ef02a/contrib/mkimage/debootstrap#L82-L105), so explicit invocation is not required. +Official Debian and Ubuntu images [automatically run `apt-get clean`](https://github.com/debuerreotype/debuerreotype/blob/c9542ab785e72696eb2908a6dbc9220abbabef39/scripts/debuerreotype-minimizing-config#L87-L109), so explicit invocation is not required. #### Using pipes diff --git a/content/manuals/build/building/multi-platform.md b/content/manuals/build/building/multi-platform.md index 175cddb470f0..e60db90b3808 100644 --- a/content/manuals/build/building/multi-platform.md +++ b/content/manuals/build/building/multi-platform.md @@ -468,7 +468,7 @@ Steps: WORKDIR /app ADD https://github.com/dvdksn/buildme.git#eb6279e0ad8a10003718656c6867539bd9426ad8 . -RUN go build -o server . - RUN GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o server . + +RUN GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o server . FROM alpine COPY --from=build /app/server /server diff --git a/content/manuals/build/building/variables.md b/content/manuals/build/building/variables.md index 1812bd02e6e9..28662a9b19e3 100644 --- a/content/manuals/build/building/variables.md +++ b/content/manuals/build/building/variables.md @@ -72,7 +72,7 @@ see [`ENV` usage example](#env-usage-example). Build arguments are commonly used to specify versions of components, such as image variants or package versions, used in a build. -Specifying versions as build arguments lets build with different versions +Specifying versions as build arguments lets you build with different versions without having to manually update the Dockerfile. It also makes it easier to maintain the Dockerfile, since it lets you declare versions at the top of the file. @@ -304,26 +304,28 @@ Note that these variables aren't used to configure the build container; they aren't available inside the build and they have no relation to the `ENV` instruction. They're used to configure the Buildx client, or the BuildKit daemon. -| Variable | Type | Description | -| --------------------------------------------------------------------------- | ----------------- | ------------------------------------------------------------ | -| [BUILDKIT_COLORS](#buildkit_colors) | String | Configure text color for the terminal output. | -| [BUILDKIT_HOST](#buildkit_host) | String | Specify host to use for remote builders. | -| [BUILDKIT_PROGRESS](#buildkit_progress) | String | Configure type of progress output. | -| [BUILDKIT_TTY_LOG_LINES](#buildkit_tty_log_lines) | String | Number of log lines (for active steps in TTY mode). | -| [BUILDX_BAKE_GIT_AUTH_HEADER](#buildx_bake_git_auth_header) | String | HTTP authentication scheme for remote Bake files. | -| [BUILDX_BAKE_GIT_AUTH_TOKEN](#buildx_bake_git_auth_token) | String | HTTP authentication token for remote Bake files. | -| [BUILDX_BAKE_GIT_SSH](#buildx_bake_git_ssh) | String | SSH authentication for remote Bake files. | -| [BUILDX_BUILDER](#buildx_builder) | String | Specify the builder instance to use. | -| [BUILDX_CONFIG](#buildx_config) | String | Specify location for configuration, state, and logs. | -| [BUILDX_CPU_PROFILE](#buildx_cpu_profile) | String | Generate a `pprof` CPU profile at the specified location. | -| [BUILDX_EXPERIMENTAL](#buildx_experimental) | Boolean | Turn on experimental features. | -| [BUILDX_GIT_CHECK_DIRTY](#buildx_git_check_dirty) | Boolean | Enable dirty Git checkout detection. | -| [BUILDX_GIT_INFO](#buildx_git_info) | Boolean | Remove Git information in provenance attestations. | -| [BUILDX_GIT_LABELS](#buildx_git_labels) | String \| Boolean | Add Git provenance labels to images. | -| [BUILDX_MEM_PROFILE](#buildx_mem_profile) | String | Generate a `pprof` memory profile at the specified location. | -| [BUILDX_NO_DEFAULT_ATTESTATIONS](#buildx_no_default_attestations) | Boolean | Turn off default provenance attestations. | -| [BUILDX_NO_DEFAULT_LOAD](#buildx_no_default_load) | Boolean | Turn off loading images to image store by default. | -| [EXPERIMENTAL_BUILDKIT_SOURCE_POLICY](#experimental_buildkit_source_policy) | String | Specify a BuildKit source policy file. | +| Variable | Type | Description | +|-----------------------------------------------------------------------------|-------------------|------------------------------------------------------------------| +| [BUILDKIT_COLORS](#buildkit_colors) | String | Configure text color for the terminal output. | +| [BUILDKIT_HOST](#buildkit_host) | String | Specify host to use for remote builders. | +| [BUILDKIT_PROGRESS](#buildkit_progress) | String | Configure type of progress output. | +| [BUILDKIT_TTY_LOG_LINES](#buildkit_tty_log_lines) | String | Number of log lines (for active steps in TTY mode). | +| [BUILDX_BAKE_GIT_AUTH_HEADER](#buildx_bake_git_auth_header) | String | HTTP authentication scheme for remote Bake files. | +| [BUILDX_BAKE_GIT_AUTH_TOKEN](#buildx_bake_git_auth_token) | String | HTTP authentication token for remote Bake files. | +| [BUILDX_BAKE_GIT_SSH](#buildx_bake_git_ssh) | String | SSH authentication for remote Bake files. | +| [BUILDX_BUILDER](#buildx_builder) | String | Specify the builder instance to use. | +| [BUILDX_CONFIG](#buildx_config) | String | Specify location for configuration, state, and logs. | +| [BUILDX_CPU_PROFILE](#buildx_cpu_profile) | String | Generate a `pprof` CPU profile at the specified location. | +| [BUILDX_EXPERIMENTAL](#buildx_experimental) | Boolean | Turn on experimental features. | +| [BUILDX_GIT_CHECK_DIRTY](#buildx_git_check_dirty) | Boolean | Enable dirty Git checkout detection. | +| [BUILDX_GIT_INFO](#buildx_git_info) | Boolean | Remove Git information in provenance attestations. | +| [BUILDX_GIT_LABELS](#buildx_git_labels) | String \| Boolean | Add Git provenance labels to images. | +| [BUILDX_MEM_PROFILE](#buildx_mem_profile) | String | Generate a `pprof` memory profile at the specified location. | +| [BUILDX_METADATA_PROVENANCE](#buildx_metadata_provenance) | String \| Boolean | Customize provenance informations included in the metadata file. | +| [BUILDX_METADATA_WARNINGS](#buildx_metadata_warnings) | String | Include build warnings in the metadata file. | +| [BUILDX_NO_DEFAULT_ATTESTATIONS](#buildx_no_default_attestations) | Boolean | Turn off default provenance attestations. | +| [BUILDX_NO_DEFAULT_LOAD](#buildx_no_default_load) | Boolean | Turn off loading images to image store by default. | +| [EXPERIMENTAL_BUILDKIT_SOURCE_POLICY](#experimental_buildkit_source_policy) | String | Specify a BuildKit source policy file. | BuildKit also supports a few additional configuration parameters. Refer to [BuildKit built-in build args](/reference/dockerfile.md#buildkit-built-in-build-args). @@ -352,7 +354,7 @@ Setting `NO_COLOR` to anything turns off colorized output, as recommended by ### BUILDKIT_HOST -{{< introduced buildx 0.9.0 "../release-notes.md#090" >}} +{{< summary-bar feature_name="Buildkit host" >}} You use the `BUILDKIT_HOST` to specify the address of a BuildKit daemon to use as a remote builder. This is the same as specifying the address as a positional @@ -438,7 +440,7 @@ Example: ### BUILDX_BAKE_GIT_AUTH_HEADER -{{< introduced buildx 0.14.0 >}} +{{< summary-bar feature_name="Buildx bake Git auth token" >}} Sets the HTTP authentication scheme when using a remote Bake definition in a private Git repository. This is equivalent to the [`GIT_AUTH_HEADER` secret](./secrets#http-authentication-scheme), @@ -453,7 +455,7 @@ $ export BUILDX_BAKE_GIT_AUTH_HEADER=basic ### BUILDX_BAKE_GIT_AUTH_TOKEN -{{< introduced buildx 0.14.0 >}} +{{< summary-bar feature_name="Buildx bake Git auth token" >}} Sets the HTTP authentication token when using a remote Bake definition in a private Git repository. This is equivalent to the [`GIT_AUTH_TOKEN` secret](./secrets#git-authentication-for-remote-contexts), @@ -467,7 +469,7 @@ $ export BUILDX_BAKE_GIT_AUTH_TOKEN=$(cat git-token.txt) ### BUILDX_BAKE_GIT_SSH -{{< introduced buildx 0.14.0 >}} +{{< summary-bar feature_name="Buildx bake Git SSH" >}} Lets you specify a list of SSH agent socket filepaths to forward to Bake for authenticating to a Git server when using a remote Bake definition in a private repository. @@ -512,7 +514,7 @@ $ export BUILDX_CONFIG=/usr/local/etc ### BUILDX_CPU_PROFILE -{{< introduced buildx 0.18.0 >}} +{{< summary-bar feature_name="Buildx CPU profile" >}} If specified, Buildx generates a `pprof` CPU profile at the specified location. @@ -538,7 +540,7 @@ $ export BUILDX_EXPERIMENTAL=1 ### BUILDX_GIT_CHECK_DIRTY -{{< introduced buildx 0.10.4 "../release-notes.md#0104" >}} +{{< summary-bar feature_name="Buildx Git check dirty" >}} When set to true, checks for dirty state in source control information for [provenance attestations](/manuals/build/metadata/attestations/slsa-provenance.md). @@ -551,7 +553,7 @@ $ export BUILDX_GIT_CHECK_DIRTY=1 ### BUILDX_GIT_INFO -{{< introduced buildx 0.10.0 "../release-notes.md#0100" >}} +{{< summary-bar feature_name="Buildx Git info" >}} When set to false, removes source control information from [provenance attestations](/manuals/build/metadata/attestations/slsa-provenance.md). @@ -564,7 +566,7 @@ $ export BUILDX_GIT_INFO=0 ### BUILDX_GIT_LABELS -{{< introduced buildx 0.10.0 "../release-notes.md#0100" >}} +{{< summary-bar feature_name="Buildx Git labels" >}} Adds provenance labels, based on Git information, to images that you build. The labels are: @@ -593,7 +595,7 @@ If the repository is in a dirty state, the `revision` gets a `-dirty` suffix. ### BUILDX_MEM_PROFILE -{{< introduced buildx 0.18.0 >}} +{{< summary-bar feature_name="Buildx mem profile" >}} If specified, Buildx generates a `pprof` memory profile at the specified location. @@ -608,9 +610,29 @@ Usage: $ export BUILDX_MEM_PROFILE=buildx_mem.prof ``` +### BUILDX_METADATA_PROVENANCE + +{{< summary-bar feature_name="Buildx metadata provenance" >}} + +By default, Buildx includes minimal provenance information in the metadata file +through [`--metadata-file` flag](/reference/cli/docker/buildx/build/#metadata-file). +This environment variable allows you to customize the provenance information +included in the metadata file: +* `min` sets minimal provenance (default). +* `max` sets full provenance. +* `disabled`, `false` or `0` does not set any provenance. + +### BUILDX_METADATA_WARNINGS + +{{< summary-bar feature_name="Buildx metadata warnings" >}} + +By default, Buildx does not include build warnings in the metadata file through +[`--metadata-file` flag](/reference/cli/docker/buildx/build/#metadata-file). +You can set this environment variable to `1` or `true` to include them. + ### BUILDX_NO_DEFAULT_ATTESTATIONS -{{< introduced buildx 0.10.4 "../release-notes.md#0104" >}} +{{< summary-bar feature_name="Buildx no default" >}} By default, BuildKit v0.11 and later adds [provenance attestations](/manuals/build/metadata/attestations/slsa-provenance.md) to images you diff --git a/content/manuals/build/buildkit/_index.md b/content/manuals/build/buildkit/_index.md index 89d75f2ad539..35afb1ee6ad9 100644 --- a/content/manuals/build/buildkit/_index.md +++ b/content/manuals/build/buildkit/_index.md @@ -154,7 +154,7 @@ see [GitHub issues](https://github.com/moby/buildkit/issues?q=is%3Aissue%20state 4. Download and extract the latest BuildKit release. ```powershell - $version = "v0.13.1" # specify the release version, v0.13+ + $version = "v0.22.0" # specify the release version, v0.13+ $arch = "amd64" # arm64 binary available too curl.exe -LO https://github.com/moby/buildkit/releases/download/$version/buildkit-$version.windows-$arch.tar.gz # there could be another `.\bin` directory from containerd instructions @@ -184,6 +184,9 @@ see [GitHub issues](https://github.com/moby/buildkit/issues?q=is%3Aissue%20state ```console > buildkitd.exe ``` + > [!NOTE] + > If you are running a _dockerd-managed_ `containerd` process, use that instead, by supplying the address: + > `buildkitd.exe --containerd-worker-addr "npipe:////./pipe/docker-containerd"` 7. In another terminal with administrator privileges, create a remote builder that uses the local BuildKit daemon. diff --git a/content/manuals/build/buildkit/dockerfile-release-notes.md b/content/manuals/build/buildkit/dockerfile-release-notes.md index 94f5ebd328bb..136135006e6b 100644 --- a/content/manuals/build/buildkit/dockerfile-release-notes.md +++ b/content/manuals/build/buildkit/dockerfile-release-notes.md @@ -13,11 +13,115 @@ issues, and bug fixes in [Dockerfile reference](/reference/dockerfile.md). For usage, see the [Dockerfile frontend syntax](frontend.md) page. +## 1.17.0 + +{{< release-date date="2025-06-17" >}} + +The full release notes for this release are available +[on GitHub](https://github.com/moby/buildkit/releases/tag/dockerfile%2F1.17.0). + +```dockerfile +# syntax=docker/dockerfile:1.17.0 +``` + +* Add `ADD --unpack=bool` to control whether archives from a URL path are unpacked. The default is to detect unpack behavior based on the source path, as it happened in previous versions. [moby/buildkit#5991](https://github.com/moby/buildkit/pull/5991) +* Add support for `ADD --chown` when unpacking archive, similar to when copying regular files. [moby/buildkit#5987](https://github.com/moby/buildkit/pull/5987) + +## 1.16.0 + +{{< release-date date="2025-05-22" >}} + +The full release notes for this release are available +[on GitHub](https://github.com/moby/buildkit/releases/tag/dockerfile%2F1.16.0). + +```dockerfile +# syntax=docker/dockerfile:1.16.0 +``` + +* `ADD --checksum` support for Git URL. [moby/buildkit#5975](https://github.com/moby/buildkit/pull/5975) +* Allow whitespace in heredocs. [moby/buildkit#5817](https://github.com/moby/buildkit/pull/5817) +* `WORKDIR` now supports `SOURCE_DATE_EPOCH`. [moby/buildkit#5960](https://github.com/moby/buildkit/pull/5960) +* Leave default PATH environment variable set by the base image for WCOW. [moby/buildkit#5895](https://github.com/moby/buildkit/pull/5895) + +## 1.15.1 + +{{< release-date date="2025-03-30" >}} + +The full release notes for this release are available +[on GitHub](https://github.com/moby/buildkit/releases/tag/dockerfile%2F1.15.1). + +```dockerfile +# syntax=docker/dockerfile:1.15.1 +``` + +* Fix `no scan targets for linux/arm64/v8` when `--attest type=sbom` is used. [moby/buildkit#5941](https://github.com/moby/buildkit/pull/5941) + +## 1.15.0 + +{{< release-date date="2025-04-15" >}} + +The full release notes for this release are available +[on GitHub](https://github.com/moby/buildkit/releases/tag/dockerfile%2F1.15.0). + +```dockerfile +# syntax=docker/dockerfile:1.15.0 +``` + +- Build error for invalid target now shows suggestions for correct possible names. [moby/buildkit#5851](https://github.com/moby/buildkit/pull/5851) +- Fix SBOM attestation producing error for Windows targets. [moby/buildkit#5837](https://github.com/moby/buildkit/pull/5837) +- Fix recursive `ARG` producing an infinite loop when processing an outline request. [moby/buildkit#5823](https://github.com/moby/buildkit/pull/5823) +- Fix parsing syntax directive from JSON that would fail if the JSON had other datatypes than strings. [moby/buildkit#5815](https://github.com/moby/buildkit/pull/5815) +- Fix platform in image config being in unnormalized form (regression from 1.12). [moby/buildkit#5776](https://github.com/moby/buildkit/pull/5776) +- Fix copying into destination directory when directory is not present with WCOW. [moby/buildkit#5249](https://github.com/moby/buildkit/pull/5249) + +## 1.14.1 + +{{< release-date date="2025-03-05" >}} + +The full release notes for this release are available +[on GitHub](https://github.com/moby/buildkit/releases/tag/dockerfile%2F1.14.1). + +```dockerfile +# syntax=docker/dockerfile:1.14.1 +``` + +- Normalize platform in image config. [moby/buildkit#5776](https://github.com/moby/buildkit/pull/5776) + +## 1.14.0 + +{{< release-date date="2025-02-19" >}} + +The full release notes for this release are available +[on GitHub](https://github.com/moby/buildkit/releases/tag/dockerfile%2F1.14.0). + +```dockerfile +# syntax=docker/dockerfile:1.14.0 +``` + +- `COPY --chmod` now allows non-octal values. This feature was previously in the labs channel and is now available in the main release. [moby/buildkit#5734](https://github.com/moby/buildkit/pull/5734) +- Fix handling of OSVersion platform property if one is set by the base image [moby/buildkit#5714](https://github.com/moby/buildkit/pull/5714) +- Fix errors where a named context metadata could be resolved even if it was not reachable by the current build configuration, leading to build errors [moby/buildkit#5688](https://github.com/moby/buildkit/pull/5688) + +## 1.14.0 (labs) + +{{< release-date date="2025-02-19" >}} + +{{% include "dockerfile-labs-channel.md" %}} + +The full release notes for this release are available +[on GitHub](https://github.com/moby/buildkit/releases/tag/dockerfile%2F1.14.0-labs). + +```dockerfile +# syntax=docker.io/docker/dockerfile-upstream:1.14.0-labs +``` + +- New `RUN --device=name,[required]` flag lets builds request CDI devices are available to the build step. Requires BuildKit v0.20.0+ [moby/buildkit#4056](https://github.com/moby/buildkit/pull/4056), [moby/buildkit#5738](https://github.com/moby/buildkit/pull/5738) + ## 1.13.0 {{< release-date date="2025-01-20" >}} -The full release note for this release is available +The full release notes for this release are available [on GitHub](https://github.com/moby/buildkit/releases/tag/dockerfile%2F1.13.0). ```dockerfile @@ -31,11 +135,13 @@ The full release note for this release is available - Fix case where `ONBUILD` command may have run twice on inherited stage. [moby/buildkit#5593](https://github.com/moby/buildkit/pull/5593) - Fix possible missing named context replacement for child stages in Dockerfile. [moby/buildkit#5596](https://github.com/moby/buildkit/pull/5596) -## 1.13.0-labs +## 1.13.0 (labs) {{< release-date date="2025-01-20" >}} -The full release note for this release is available +{{% include "dockerfile-labs-channel.md" %}} + +The full release notes for this release are available [on GitHub](https://github.com/moby/buildkit/releases/tag/dockerfile%2F1.13.0-labs). ```dockerfile @@ -48,7 +154,7 @@ The full release note for this release is available {{< release-date date="2024-11-27" >}} -The full release note for this release is available +The full release notes for this release are available [on GitHub](https://github.com/moby/buildkit/releases/tag/dockerfile%2F1.12.0). ```dockerfile @@ -63,7 +169,7 @@ The full release note for this release is available {{< release-date date="2024-11-08" >}} -The full release note for this release is available +The full release notes for this release are available [on GitHub](https://github.com/moby/buildkit/releases/tag/dockerfile%2F1.11.1). ```dockerfile @@ -78,7 +184,7 @@ The full release note for this release is available {{< release-date date="2024-10-30" >}} -The full release note for this release is available +The full release notes for this release are available [on GitHub](https://github.com/moby/buildkit/releases/tag/dockerfile%2F1.11.0). ```dockerfile @@ -93,7 +199,9 @@ The full release note for this release is available - Enhanced progress output for secret values mounted as environment variables. [moby/buildkit#5336] - Added built-in build argument `TARGETSTAGE` to expose the name of the (final) target stage for the current build. [moby/buildkit#5431] -### 1.11.0-labs +## 1.11.0 (labs) + +{{% include "dockerfile-labs-channel.md" %}} - `COPY --chmod` now supports non-octal values. [moby/buildkit#5380] @@ -110,7 +218,7 @@ The full release note for this release is available {{< release-date date="2024-09-10" >}} -The full release note for this release is available +The full release notes for this release are available [on GitHub](https://github.com/moby/buildkit/releases/tag/dockerfile%2F1.10.0). ```dockerfile @@ -137,7 +245,7 @@ The full release note for this release is available {{< release-date date="2024-07-11" >}} -The full release note for this release is available +The full release notes for this release are available [on GitHub](https://github.com/moby/buildkit/releases/tag/dockerfile%2F1.9.0). ```dockerfile @@ -160,7 +268,7 @@ The full release note for this release is available {{< release-date date="2024-06-18" >}} -The full release note for this release is available +The full release notes for this release are available [on GitHub](https://github.com/moby/buildkit/releases/tag/dockerfile%2F1.8.1). ```dockerfile @@ -177,7 +285,7 @@ The full release note for this release is available {{< release-date date="2024-06-11" >}} -The full release note for this release is available +The full release notes for this release are available [on GitHub](https://github.com/moby/buildkit/releases/tag/dockerfile%2F1.8.0). ```dockerfile @@ -277,7 +385,7 @@ The following features have graduated from the labs channel to stable: {{< release-date date="2023-01-10" >}} -{{< include "dockerfile-labs-channel.md" >}} +{{% include "dockerfile-labs-channel.md" %}} ### New @@ -371,7 +479,7 @@ The following features have graduated from the labs channel to stable: {{< release-date date="2021-07-16" >}} -{{< include "dockerfile-labs-channel.md" >}} +{{% include "dockerfile-labs-channel.md" %}} ### New @@ -402,7 +510,7 @@ The following features have graduated from the labs channel to stable: {{< release-date date="2020-12-12" >}} -{{< include "dockerfile-labs-channel.md" >}} +{{% include "dockerfile-labs-channel.md" %}} ### Bug fixes and enhancements @@ -423,7 +531,7 @@ The following features have graduated from the labs channel to stable: {{< release-date date="2020-12-03" >}} -{{< include "dockerfile-labs-channel.md" >}} +{{% include "dockerfile-labs-channel.md" %}} ### Bug fixes and enhancements @@ -456,11 +564,11 @@ The following features have graduated from the labs channel to stable: - Forward `FrontendInputs` to the gateway -## 1.1.2 (experimental) +## 1.1.2 (labs) {{< release-date date="2019-07-31" >}} -{{< include "dockerfile-labs-channel.md" >}} +{{% include "dockerfile-labs-channel.md" %}} ### Bug fixes and enhancements diff --git a/content/manuals/build/cache/backends/_index.md b/content/manuals/build/cache/backends/_index.md index 6dd4a8a9dee9..3606910a44dd 100644 --- a/content/manuals/build/cache/backends/_index.md +++ b/content/manuals/build/cache/backends/_index.md @@ -81,12 +81,11 @@ $ docker buildx build --push -t / \ ## Multiple caches -BuildKit currently only supports -[a single cache exporter](https://github.com/moby/buildkit/pull/3024). But you -can import from as many remote caches as you like. For example, a common pattern -is to use the cache of both the current branch and the main branch. The -following example shows importing cache from multiple locations using the -registry cache backend: +BuildKit supports multiple cache exporters, allowing you to push cache to more +than one destination. You can also import from as many remote caches as you'd +like. For example, a common pattern is to use the cache of both the current +branch and the main branch. The following example shows importing cache from +multiple locations using the registry cache backend: ```console $ docker buildx build --push -t / \ @@ -180,3 +179,6 @@ $ docker buildx build --push -t / \ --cache-to type=registry,ref=/,oci-mediatypes=true,image-manifest=true \ --cache-from type=registry,ref=/ . ``` + +> [!NOTE] +> Since BuildKit v0.21, `image-manifest` is enabled by default. diff --git a/content/manuals/build/cache/backends/gha.md b/content/manuals/build/cache/backends/gha.md index e5de3accdae5..c5bab63d0b66 100644 --- a/content/manuals/build/cache/backends/gha.md +++ b/content/manuals/build/cache/backends/gha.md @@ -30,16 +30,17 @@ $ docker buildx build --push -t / \ The following table describes the available CSV parameters that you can pass to `--cache-to` and `--cache-from`. -| Name | Option | Type | Default | Description | -| -------------- | ----------------------- | ----------- | ------------------------ | -------------------------------------------------------------------- | -| `url` | `cache-to`,`cache-from` | String | `$ACTIONS_CACHE_URL` | Cache server URL, see [authentication][1]. | -| `token` | `cache-to`,`cache-from` | String | `$ACTIONS_RUNTIME_TOKEN` | Access token, see [authentication][1]. | -| `scope` | `cache-to`,`cache-from` | String | `buildkit` | Which scope cache object belongs to, see [scope][2] | -| `mode` | `cache-to` | `min`,`max` | `min` | Cache layers to export, see [cache mode][3]. | -| `ignore-error` | `cache-to` | Boolean | `false` | Ignore errors caused by failed cache exports. | -| `timeout` | `cache-to`,`cache-from` | String | `10m` | Max duration for importing or exporting cache before it's timed out. | -| `repository` | `cache-to` | String | | GitHub repository used for cache storage. | -| `ghtoken` | `cache-to` | String | | GitHub token required for accessing the GitHub API. | +| Name | Option | Type | Default | Description | +|----------------|-------------------------|-------------|------------------------------------------------|----------------------------------------------------------------------| +| `url` | `cache-to`,`cache-from` | String | `$ACTIONS_CACHE_URL` or `$ACTIONS_RESULTS_URL` | Cache server URL, see [authentication][1]. | +| `url_v2` | `cache-to`,`cache-from` | String | `$ACTIONS_RESULTS_URL` | Cache v2 server URL, see [authentication][1]. | +| `token` | `cache-to`,`cache-from` | String | `$ACTIONS_RUNTIME_TOKEN` | Access token, see [authentication][1]. | +| `scope` | `cache-to`,`cache-from` | String | `buildkit` | Which scope cache object belongs to, see [scope][2] | +| `mode` | `cache-to` | `min`,`max` | `min` | Cache layers to export, see [cache mode][3]. | +| `ignore-error` | `cache-to` | Boolean | `false` | Ignore errors caused by failed cache exports. | +| `timeout` | `cache-to`,`cache-from` | String | `10m` | Max duration for importing or exporting cache before it's timed out. | +| `repository` | `cache-to` | String | | GitHub repository used for cache storage. | +| `ghtoken` | `cache-to` | String | | GitHub token required for accessing the GitHub API. | [1]: #authentication [2]: #scope @@ -47,10 +48,10 @@ The following table describes the available CSV parameters that you can pass to ## Authentication -If the `url` or `token` parameters are left unspecified, the `gha` cache backend -will fall back to using environment variables. If you invoke the `docker buildx` -command manually from an inline step, then the variables must be manually -exposed. Consider using the +If the `url`, `url_v2` or `token` parameters are left unspecified, the `gha` +cache backend will fall back to using environment variables. If you invoke the +`docker buildx` command manually from an inline step, then the variables must +be manually exposed. Consider using the [`crazy-max/ghaction-github-runtime`](https://github.com/crazy-max/ghaction-github-runtime), GitHub Action as a helper for exposing the variables. diff --git a/content/manuals/build/cache/backends/local.md b/content/manuals/build/cache/backends/local.md index 248c6d764c89..69f32107c3d5 100644 --- a/content/manuals/build/cache/backends/local.md +++ b/content/manuals/build/cache/backends/local.md @@ -25,13 +25,13 @@ The following table describes the available CSV parameters that you can pass to `--cache-to` and `--cache-from`. | Name | Option | Type | Default | Description | -| ------------------- | ------------ | ----------------------- | ------- | ------------------------------------------------------------------------------------------------------------------------------- | +|---------------------|--------------|-------------------------|---------|---------------------------------------------------------------------------------------------------------------------------------| | `src` | `cache-from` | String | | Path of the local directory where cache gets imported from. | | `digest` | `cache-from` | String | | Digest of manifest to import, see [cache versioning][4]. | | `dest` | `cache-to` | String | | Path of the local directory where cache gets exported to. | | `mode` | `cache-to` | `min`,`max` | `min` | Cache layers to export, see [cache mode][1]. | | `oci-mediatypes` | `cache-to` | `true`,`false` | `true` | Use OCI media types in exported manifests, see [OCI media types][2]. | -| `image-manifest` | `cache-to` | `true`,`false` | `false` | When using OCI media types, generate an image manifest instead of an image index for the cache image, see [OCI media types][2]. | +| `image-manifest` | `cache-to` | `true`,`false` | `true` | When using OCI media types, generate an image manifest instead of an image index for the cache image, see [OCI media types][2]. | | `compression` | `cache-to` | `gzip`,`estargz`,`zstd` | `gzip` | Compression type, see [cache compression][3]. | | `compression-level` | `cache-to` | `0..22` | | Compression level, see [cache compression][3]. | | `force-compression` | `cache-to` | `true`,`false` | `false` | Forcibly apply compression, see [cache compression][3]. | @@ -81,7 +81,7 @@ and kept indefinitely. Therefore, the size of the local cache will continue to grow (see [`moby/buildkit#1896`](https://github.com/moby/buildkit/issues/1896) for more information). -When importing cache using `--cache-to`, you can specify the `digest` parameter +When importing cache using `--cache-from`, you can specify the `digest` parameter to force loading an older version of the cache, for example: ```console diff --git a/content/manuals/build/cache/backends/registry.md b/content/manuals/build/cache/backends/registry.md index 9a4ff0d1a027..ce9a7d4ee0e3 100644 --- a/content/manuals/build/cache/backends/registry.md +++ b/content/manuals/build/cache/backends/registry.md @@ -37,11 +37,11 @@ The following table describes the available CSV parameters that you can pass to `--cache-to` and `--cache-from`. | Name | Option | Type | Default | Description | -| ------------------- | ----------------------- | ----------------------- | ------- | ------------------------------------------------------------------------------------------------------------------------------- | +|---------------------|-------------------------|-------------------------|---------|---------------------------------------------------------------------------------------------------------------------------------| | `ref` | `cache-to`,`cache-from` | String | | Full name of the cache image to import. | | `mode` | `cache-to` | `min`,`max` | `min` | Cache layers to export, see [cache mode][1]. | | `oci-mediatypes` | `cache-to` | `true`,`false` | `true` | Use OCI media types in exported manifests, see [OCI media types][2]. | -| `image-manifest` | `cache-to` | `true`,`false` | `false` | When using OCI media types, generate an image manifest instead of an image index for the cache image, see [OCI media types][2]. | +| `image-manifest` | `cache-to` | `true`,`false` | `true` | When using OCI media types, generate an image manifest instead of an image index for the cache image, see [OCI media types][2]. | | `compression` | `cache-to` | `gzip`,`estargz`,`zstd` | `gzip` | Compression type, see [cache compression][3]. | | `compression-level` | `cache-to` | `0..22` | | Compression level, see [cache compression][3]. | | `force-compression` | `cache-to` | `true`,`false` | `false` | Forcibly apply compression, see [cache compression][3]. | diff --git a/content/manuals/build/checks.md b/content/manuals/build/checks.md index b741f71800e7..afbf82392668 100644 --- a/content/manuals/build/checks.md +++ b/content/manuals/build/checks.md @@ -36,6 +36,11 @@ Build checks are useful for: - Identifying potential issues or anti-patterns in your Dockerfile and build options. +> [!TIP] +> +> Want a better editing experience for Dockerfiles in VS Code? +> Check out the [Docker VS Code Extension (Beta)](https://marketplace.visualstudio.com/items?itemName=docker.docker) for linting, code navigation, and vulnerability scanning. + ## Build with checks Build checks are supported in: diff --git a/content/manuals/build/ci/github-actions/_index.md b/content/manuals/build/ci/github-actions/_index.md index 444bc3bc3f7e..4f80a110be52 100644 --- a/content/manuals/build/ci/github-actions/_index.md +++ b/content/manuals/build/ci/github-actions/_index.md @@ -17,17 +17,22 @@ The following GitHub Actions are available: - [Build and push Docker images](https://github.com/marketplace/actions/build-and-push-docker-images): build and push Docker images with BuildKit. +- [Docker Buildx Bake](https://github.com/marketplace/actions/docker-buildx-bake): + enables using high-level builds with [Bake](../../bake/_index.md). - [Docker Login](https://github.com/marketplace/actions/docker-login): sign in to a Docker registry. - [Docker Setup Buildx](https://github.com/marketplace/actions/docker-setup-buildx): - initiates a BuildKit builder. + creates and boots a BuildKit builder. - [Docker Metadata action](https://github.com/marketplace/actions/docker-metadata-action): - extracts metadata from Git reference and GitHub events. + extracts metadata from Git reference and GitHub events to generate tags, + labels, and annotations. +- [Docker Setup Compose](https://github.com/marketplace/actions/docker-setup-compose): + installs and sets up [Compose](../../../compose). +- [Docker Setup Docker](https://github.com/marketplace/actions/docker-setup-docker): + installs Docker CE. - [Docker Setup QEMU](https://github.com/marketplace/actions/docker-setup-qemu): - installs [QEMU](https://github.com/qemu/qemu) static binaries for multi-arch - builds. -- [Docker Buildx Bake](https://github.com/marketplace/actions/docker-buildx-bake): - enables using high-level builds with [Bake](../../bake/_index.md). + installs [QEMU](https://github.com/qemu/qemu) static binaries for + multi-platform builds. - [Docker Scout](https://github.com/docker/scout-action): analyze Docker images for security vulnerabilities. diff --git a/content/manuals/build/ci/github-actions/build-summary.md b/content/manuals/build/ci/github-actions/build-summary.md index a2e74b5093c3..9472ead8e685 100644 --- a/content/manuals/build/ci/github-actions/build-summary.md +++ b/content/manuals/build/ci/github-actions/build-summary.md @@ -67,7 +67,7 @@ in the YAML configuration for your build step: ```yaml {hl_lines=4} - name: Build - uses: docker/docker-build-push-action@v6 + uses: docker/build-push-action@v6 env: DOCKER_BUILD_SUMMARY: false with: @@ -83,7 +83,7 @@ your build step: ```yaml {hl_lines=4} - name: Build - uses: docker/docker-build-push-action@v6 + uses: docker/build-push-action@v6 env: DOCKER_BUILD_RECORD_UPLOAD: false with: diff --git a/content/manuals/build/ci/github-actions/cache.md b/content/manuals/build/ci/github-actions/cache.md index f005c97cd5c4..5626447e1e43 100644 --- a/content/manuals/build/ci/github-actions/cache.md +++ b/content/manuals/build/ci/github-actions/cache.md @@ -87,9 +87,9 @@ jobs: {{< summary-bar feature_name="Cache backend API" >}} The [GitHub Actions cache exporter](../../cache/backends/gha.md) -backend uses the [GitHub Cache API](https://github.com/tonistiigi/go-actions-cache/blob/master/api.md) +backend uses the [GitHub Cache service API](https://github.com/tonistiigi/go-actions-cache) to fetch and upload cache blobs. That's why you should only use this cache -backend in a GitHub Action workflow, as the `url` (`$ACTIONS_CACHE_URL`) and +backend in a GitHub Action workflow, as the `url` (`$ACTIONS_RESULTS_URL`) and `token` (`$ACTIONS_RUNTIME_TOKEN`) attributes only get populated in a workflow context. @@ -121,6 +121,64 @@ jobs: cache-to: type=gha,mode=max ``` +> [!IMPORTANT] +> +> Starting [April 15th, 2025, only GitHub Cache service API v2 will be supported](https://gh.io/gha-cache-sunset). +> +> If you encounter the following error during your build: +> +> ```console +> ERROR: failed to solve: This legacy service is shutting down, effective April 15, 2025. Migrate to the new service ASAP. For more information: https://gh.io/gha-cache-sunset +> ``` +> +> You're probably using outdated tools that only support the legacy GitHub +> Cache service API v1. Here are the minimum versions you need to upgrade to +> depending on your use case: +> * Docker Buildx >= v0.21.0 +> * BuildKit >= v0.20.0 +> * Docker Compose >= v2.33.1 +> * Docker Engine >= v28.0.0 (if you're building using the Docker driver with containerd image store enabled) +> +> If you're building using the `docker/build-push-action` or `docker/bake-action` +> actions on GitHub hosted runners, Docker Buildx and BuildKit are already up +> to date but on self-hosted runners, you may need to update them yourself. +> Alternatively, you can use the `docker/setup-buildx-action` action to install +> the latest version of Docker Buildx: +> +> ```yaml +> - name: Set up Docker Buildx +> uses: docker/setup-buildx-action@v3 +> with: +> version: latest +> ``` +> +> If you're building using Docker Compose, you can use the +> `docker/setup-compose-action` action: +> +> ```yaml +> - name: Set up Docker Compose +> uses: docker/setup-compose-action@v1 +> with: +> version: latest +> ``` +> +> If you're building using the Docker Engine with the containerd image store +> enabled, you can use the `docker/setup-docker-action` action: +> +> ```yaml +> - +> name: Set up Docker +> uses: docker/setup-docker-action@v4 +> with: +> version: latest +> daemon-config: | +> { +> "features": { +> "containerd-snapshotter": true +> } +> } +> ``` + ### Cache mounts BuildKit doesn't preserve cache mounts in the GitHub Actions cache by default. diff --git a/content/manuals/build/ci/github-actions/multi-platform.md b/content/manuals/build/ci/github-actions/multi-platform.md index 71b48d32267e..787abccda09d 100644 --- a/content/manuals/build/ci/github-actions/multi-platform.md +++ b/content/manuals/build/ci/github-actions/multi-platform.md @@ -109,13 +109,8 @@ each platform across multiple runners and create manifest list using the The following workflow will build the image for each platform on a dedicated runner using a matrix strategy and push by digest. Then, the `merge` job will -create manifest lists and push them to two registries: - -- Docker Hub: `docker.io/docker-user/my-app` -- GitHub Container Registry: `ghcr.io/gh-user/my-app` - -This example also uses the [`metadata` action](https://github.com/docker/metadata-action) -to set tags and labels. +create manifest lists and push them to Docker Hub. The [`metadata` action](https://github.com/docker/metadata-action) +is used to set tags and labels. ```yaml name: ci @@ -124,8 +119,7 @@ on: push: env: - DOCKERHUB_REPO: docker-user/my-app - GHCR_REPO: ghcr.io/gh-user/my-app + REGISTRY_IMAGE: user/app jobs: build: @@ -146,9 +140,7 @@ jobs: id: meta uses: docker/metadata-action@v5 with: - images: | - ${{ env.DOCKERHUB_REPO }} - ${{ env.GHCR_REPO }} + images: ${{ env.REGISTRY_IMAGE }} - name: Login to Docker Hub uses: docker/login-action@v3 @@ -156,13 +148,6 @@ jobs: username: ${{ vars.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Login to GHCR - uses: docker/login-action@v3 - with: - registry: ghcr.io - username: ${{ github.repository_owner }} - password: ${{ secrets.GITHUB_TOKEN }} - - name: Set up QEMU uses: docker/setup-qemu-action@v3 @@ -175,7 +160,8 @@ jobs: with: platforms: ${{ matrix.platform }} labels: ${{ steps.meta.outputs.labels }} - outputs: type=image,"name=${{ env.DOCKERHUB_REPO }},${{ env.GHCR_REPO }}",push-by-digest=true,name-canonical=true,push=true + tags: ${{ env.REGISTRY_IMAGE }} + outputs: type=image,push-by-digest=true,name-canonical=true,push=true - name: Export digest run: | @@ -209,13 +195,6 @@ jobs: username: ${{ vars.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Login to GHCR - uses: docker/login-action@v3 - with: - registry: ghcr.io - username: ${{ github.repository_owner }} - password: ${{ secrets.GITHUB_TOKEN }} - - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 @@ -223,9 +202,7 @@ jobs: id: meta uses: docker/metadata-action@v5 with: - images: | - ${{ env.DOCKERHUB_REPO }} - ${{ env.GHCR_REPO }} + images: ${{ env.REGISTRY_IMAGE }} tags: | type=ref,event=branch type=ref,event=pr @@ -236,14 +213,11 @@ jobs: working-directory: ${{ runner.temp }}/digests run: | docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \ - $(printf '${{ env.DOCKERHUB_REPO }}@sha256:%s ' *) - docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \ - $(printf '${{ env.GHCR_REPO }}@sha256:%s ' *) + $(printf '${{ env.REGISTRY_IMAGE }}@sha256:%s ' *) - name: Inspect image run: | - docker buildx imagetools inspect ${{ env.DOCKERHUB_REPO }}:${{ steps.meta.outputs.version }} - docker buildx imagetools inspect ${{ env.GHCR_REPO }}:${{ steps.meta.outputs.version }} + docker buildx imagetools inspect ${{ env.REGISTRY_IMAGE }}:${{ steps.meta.outputs.version }} ``` ### With Bake @@ -377,9 +351,9 @@ jobs: cwd://${{ runner.temp }}/bake-meta.json targets: image set: | - *.tags= + *.tags=${{ env.REGISTRY_IMAGE }} *.platform=${{ matrix.platform }} - *.output=type=image,"name=${{ env.REGISTRY_IMAGE }}",push-by-digest=true,name-canonical=true,push=true + *.output=type=image,push-by-digest=true,name-canonical=true,push=true - name: Export digest run: | diff --git a/content/manuals/build/ci/github-actions/secrets.md b/content/manuals/build/ci/github-actions/secrets.md index e24ea725d56a..e66fa497a2cd 100644 --- a/content/manuals/build/ci/github-actions/secrets.md +++ b/content/manuals/build/ci/github-actions/secrets.md @@ -14,7 +14,7 @@ Docker Build supports two forms of secrets: - [SSH mounts](#ssh-mounts) add SSH agent sockets or keys into the build container. This page shows how to use secrets with GitHub Actions. -For an introduction to secrets in general, see [Build secrets](../../building/secrets.md). +For an introduction to secrets in general, see [Build secrets](/manuals/build/building/secrets.md). ## Secret mounts diff --git a/content/manuals/build/ci/github-actions/share-image-jobs.md b/content/manuals/build/ci/github-actions/share-image-jobs.md index 747dfe591bd7..0fb11c219575 100644 --- a/content/manuals/build/ci/github-actions/share-image-jobs.md +++ b/content/manuals/build/ci/github-actions/share-image-jobs.md @@ -7,6 +7,7 @@ keywords: ci, github actions, gha, buildkit, buildx As each job is isolated in its own runner, you can't use your built image between jobs, except if you're using [self-hosted runners](https://docs.github.com/en/actions/hosting-your-own-runners/about-self-hosted-runners) +or [Docker Build Cloud](/build-cloud). However, you can [pass data between jobs](https://docs.github.com/en/actions/using-workflows/storing-workflow-data-as-artifacts#passing-data-between-jobs-in-a-workflow) in a workflow using the [actions/upload-artifact](https://github.com/actions/upload-artifact) and [actions/download-artifact](https://github.com/actions/download-artifact) diff --git a/content/manuals/build/concepts/context.md b/content/manuals/build/concepts/context.md index cb5a41c4d443..2818bec9b7b6 100644 --- a/content/manuals/build/concepts/context.md +++ b/content/manuals/build/concepts/context.md @@ -567,3 +567,193 @@ README-secret.md All of the README files are included. The middle line has no effect because `!README*.md` matches `README-secret.md` and comes last. + +## Named contexts + +In addition to the default build context (the positional argument to the +`docker build` command), you can also pass additional named contexts to builds. + +Named contexts are specified using the `--build-context` flag, followed by a +name-value pair. This lets you include files and directories from multiple +sources during the build, while keeping them logically separated. + +```console +$ docker build --build-context docs=./docs . +``` + +In this example: + +- The named `docs` context points to the `./docs` directory. +- The default context (`.`) points to the current working directory. + +### Using named contexts in a Dockerfile + +Dockerfile instructions can reference named contexts as if they are stages in a +multi-stage build. + +For example, the following Dockerfile: + +1. Uses a `COPY` instruction to copy files from the default context into the + current build stage. +2. Bind mounts the files in a named context to process the files as part of the + build. + +```dockerfile +# syntax=docker/dockerfile:1 +FROM buildbase +WORKDIR /app + +# Copy all files from the default context into /app/src in the build container +COPY . /app/src +RUN make bin + +# Mount the files from the named "docs" context to build the documentation +RUN --mount=from=docs,target=/app/docs \ + make manpages +``` + +### Use cases for named contexts + +Using named contexts allows for greater flexibility and efficiency when +building Docker images. Here are some scenarios where using named contexts can +be useful: + +#### Example: combine local and remote sources + +You can define separate named contexts for different types of sources. For +example, consider a project where the application source code is local, but the +deployment scripts are stored in a Git repository: + +```console +$ docker build --build-context scripts=https://github.com/user/deployment-scripts.git . +``` + +In the Dockerfile, you can use these contexts independently: + +```dockerfile +# syntax=docker/dockerfile:1 +FROM alpine:latest + +# Copy application code from the main context +COPY . /opt/app + +# Run deployment scripts using the remote "scripts" context +RUN --mount=from=scripts,target=/scripts /scripts/main.sh +``` + +#### Example: dynamic builds with custom dependencies + +In some scenarios, you might need to dynamically inject configuration files or +dependencies into the build from external sources. Named contexts make this +straightforward by allowing you to mount different configurations without +modifying the default build context. + +```console +$ docker build --build-context config=./configs/prod . +``` + +Example Dockerfile: + +```dockerfile +# syntax=docker/dockerfile:1 +FROM nginx:alpine + +# Use the "config" context for environment-specific configurations +COPY --from=config nginx.conf /etc/nginx/nginx.conf +``` + +#### Example: pin or override images + +You can refer to named contexts in a Dockerfile the same way you can refer to +an image. That means you can change an image reference in your Dockerfile by +overriding it with a named context. For example, given the following +Dockerfile: + +```dockerfile +FROM alpine:{{% param example_alpine_version %}} +``` + +If you want to force image reference to resolve to a different version, without +changing the Dockerfile, you can pass a context with the same name to the +build. For example: + +```console +docker buildx build --build-context alpine:{{% param example_alpine_version %}}=docker-image://alpine:edge . +``` + +The `docker-image://` prefix marks the context as an image reference. The +reference can be a local image or an image in your registry. + +### Named contexts with Bake + +[Bake](/manuals/build/bake/_index.md) is a tool built into `docker build` that +lets you manage your build configuration with a configuration file. Bake fully +supports named contexts. + +To define named contexts in a Bake file: + +```hcl {title=docker-bake.hcl} +target "app" { + contexts = { + docs = "./docs" + } +} +``` + +This is equivalent to the following CLI invocation: + +```console +$ docker build --build-context docs=./docs . +``` + +#### Linking targets with named contexts + +In addition to making complex builds more manageable, Bake also provides +additional features on top of what you can do with `docker build` on the CLI. +You can use named contexts to create build pipelines, where one target depends +on and builds on top of another. For example, consider a Docker build setup +where you have two Dockerfiles: + +- `base.Dockerfile`: for building a base image +- `app.Dockerfile`: for building an application image + +The `app.Dockerfile` uses the image produced by `base.Dockerfile` as it's base +image: + +```dockerfile {title=app.Dockerfile} +FROM mybaseimage +``` + +Normally, you would have to build the base image first, and then either load it +to Docker Engine's local image store or push it to a registry. With Bake, you +can reference other targets directly, creating a dependency between the `app` +target and the `base` target. + +```hcl {title=docker-bake.hcl} +target "base" { + dockerfile = "base.Dockerfile" +} + +target "app" { + dockerfile = "app.Dockerfile" + contexts = { + # the target: prefix indicates that 'base' is a Bake target + mybaseimage = "target:base" + } +} +``` + +With this configuration, references to `mybaseimage` in `app.Dockerfile` use +the results from building the `base` target. Building the `app` target will +also trigger a rebuild of `mybaseimage`, if necessary: + +```console +$ docker buildx bake app +``` + +### Further reading + +For more information about working with named contexts, see: + +- [`--build-context` CLI reference](/reference/cli/docker/buildx/build.md#build-context) +- [Using Bake with additional contexts](/manuals/build/bake/contexts.md) diff --git a/content/manuals/build/concepts/dockerfile.md b/content/manuals/build/concepts/dockerfile.md index 6a9de90499a4..18601a9b15de 100644 --- a/content/manuals/build/concepts/dockerfile.md +++ b/content/manuals/build/concepts/dockerfile.md @@ -278,3 +278,8 @@ $ docker run -p 127.0.0.1:8000:8000 test:latest This publishes the container's port 8000 to `http://localhost:8000` on the Docker host. + +> [!TIP] +> +> Want a better editing experience for Dockerfiles in VS Code? +> Check out the [Docker VS Code Extension (Beta)](https://marketplace.visualstudio.com/items?itemName=docker.docker) for linting, code navigation, and vulnerability scanning. diff --git a/content/manuals/build/exporters/_index.md b/content/manuals/build/exporters/_index.md index 3aae8b944033..2921fb1ff31e 100644 --- a/content/manuals/build/exporters/_index.md +++ b/content/manuals/build/exporters/_index.md @@ -184,7 +184,7 @@ WARNING: No output specified with docker-container driver. ## Multiple exporters -{{< introduced buildx 0.13.0 >}} +{{< summary-bar feature_name="Build multiple exporters" >}} You can use multiple exporters for any given build by specifying the `--output` flag multiple times. This requires **both Buildx and BuildKit** version 0.13.0 diff --git a/content/manuals/build/exporters/image-registry.md b/content/manuals/build/exporters/image-registry.md index f807c2980a4f..159be14265fa 100644 --- a/content/manuals/build/exporters/image-registry.md +++ b/content/manuals/build/exporters/image-registry.md @@ -37,6 +37,7 @@ The following table describes the available parameters that you can pass to | `force-compression` | `true`,`false` | `false` | Forcefully apply compression, see [compression][1] | | `rewrite-timestamp` | `true`,`false` | `false` | Rewrite the file timestamps to the `SOURCE_DATE_EPOCH` value. See [build reproducibility][4] for how to specify the `SOURCE_DATE_EPOCH` value. | | `oci-mediatypes` | `true`,`false` | `false` | Use OCI media types in exporter manifests, see [OCI Media types][2] | +| `oci-artifact` | `true`,`false` | `false` | Attestations are formatted as OCI artifacts, see [OCI Media types][2] | | `unpack` | `true`,`false` | `false` | Unpack image after creation (for use with containerd) | | `store` | `true`,`false` | `true` | Store the result images to the worker's (for example, containerd) image store, and ensures that the image has all blobs in the content store. Ignored if the worker doesn't have image store (when using OCI workers, for example). | | `annotation.` | String | | Attach an annotation with the respective `key` and `value` to the built image,see [annotations][3] | @@ -45,6 +46,7 @@ The following table describes the available parameters that you can pass to [2]: _index.md#oci-media-types [3]: #annotations [4]: https://github.com/moby/buildkit/blob/master/docs/build-repro.md +[5]: /manuals/build/metadata/attestations/_index.md#attestations-as-oci-artifacts ## Annotations diff --git a/content/manuals/build/exporters/local-tar.md b/content/manuals/build/exporters/local-tar.md index dca9f3ab6561..4cec2f0c8758 100644 --- a/content/manuals/build/exporters/local-tar.md +++ b/content/manuals/build/exporters/local-tar.md @@ -25,9 +25,10 @@ $ docker buildx build --output type=tar[,parameters] . The following table describes the available parameters: -| Parameter | Type | Default | Description | -| --------- | ------ | ------- | --------------------- | -| `dest` | String | | Path to copy files to | +| Parameter | Type | Default | Description | +|------------------|---------|---------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `dest` | String | | Path to copy files to | +| `platform-split` | Boolean | `true` | When using the local exporter with a multi-platform build, by default, a subfolder matching each target platform is created in the destination directory. Set it to `false` to merge files from all platforms into the same directory. | ## Further reading diff --git a/content/manuals/build/images/build-variables.svg b/content/manuals/build/images/build-variables.svg index 13197975fb1c..07dab5f2d326 100644 --- a/content/manuals/build/images/build-variables.svg +++ b/content/manuals/build/images/build-variables.svg @@ -1,3 +1,3 @@ - Global scope# Build arguments declared here are in the global scopeARG GLOBAL_ARG="global default value"ARG VERSION="3.19"# You can't declare environment variables in the global scopeENV GLOBAL_ENV=false# GLOBAL_ARG was not redeclared in this stageRUN echo $GLOBAL_ARG# LOCAL_ARG was declared in stage-aRUN echo $LOCAL_ARGstage-bFROM --platform=$BUILDPLATFORM alpine:${VERSION} as stage-bstage-a# FROM-lines belong to the global scope and have access to global ARGsFROM alpine:${VERSION} as stage-a# Redeclaring GLOBAL_ARG without a value inherits the global defaultARG GLOBAL_ARGRUN echo $GLOBAL_ARG# ARG here this scope creates a local argumentARG LOCAL_ARG="local arg in stage-a"# Set an environment variable in this scopeENV LOCAL_ENV=true# Set an environment variable to the value of a build argumentENV MY_VAR=$LOCAL_ARGstage-c# New stage based on "stage-a"FROM stage-a AS stage-c# Arguments and variables are inherited from parent stagesRUN echo $LOCAL_ARGRUN echo $LOCAL_ENV<- prints an empty string<- prints an empty string<- prints "global default value"<- prints "local arg in stage-a"<- prints "true"ARG TARGETPLATFORM# You must redeclare pre-defined arguments to use them in a stageRUN echo $TARGETPLATFORM<- prints os/arch/variant of --platform# Pre-defined multi-platform arguments like $BUILDPLATFORM are global + Global scope# Build arguments declared here are in the global scopeARG GLOBAL_ARG="global default value"ARG VERSION="3.21"# You can't declare environment variables in the global scopeENV GLOBAL_ENV=false# GLOBAL_ARG was not redeclared in this stageRUN echo $GLOBAL_ARG# LOCAL_ARG was declared in stage-aRUN echo $LOCAL_ARGstage-bFROM --platform=$BUILDPLATFORM alpine:${VERSION} as stage-bstage-a# FROM-lines belong to the global scope and have access to global ARGsFROM alpine:${VERSION} as stage-a# Redeclaring GLOBAL_ARG without a value inherits the global defaultARG GLOBAL_ARGRUN echo $GLOBAL_ARG# ARG here this scope creates a local argumentARG LOCAL_ARG="local arg in stage-a"# Set an environment variable in this scopeENV LOCAL_ENV=true# Set an environment variable to the value of a build argumentENV MY_VAR=$LOCAL_ARGstage-c# New stage based on "stage-a"FROM stage-a AS stage-c# Arguments and variables are inherited from parent stagesRUN echo $LOCAL_ARGRUN echo $LOCAL_ENV<- prints an empty string<- prints an empty string<- prints "global default value"<- prints "local arg in stage-a"<- prints "true"ARG TARGETPLATFORM# You must redeclare pre-defined arguments to use them in a stageRUN echo $TARGETPLATFORM<- prints os/arch/variant of --platform# Pre-defined multi-platform arguments like $BUILDPLATFORM are global diff --git a/content/manuals/build/metadata/annotations.md b/content/manuals/build/metadata/annotations.md index 910ce96f9d31..330deb818137 100644 --- a/content/manuals/build/metadata/annotations.md +++ b/content/manuals/build/metadata/annotations.md @@ -11,7 +11,7 @@ arbitrary information and attach it to your image, which helps consumers and tools understand the origin, contents, and how to use the image. Annotations are similar to, and in some sense overlap with, [labels]. Both -serve the same purpose: attach metadata to a resource. As a general principle, +serve the same purpose: to attach metadata to a resource. As a general principle, you can think of the difference between annotations and labels as follows: - Annotations describe OCI image components, such as [manifests], [indexes], @@ -68,7 +68,7 @@ For examples on how to add annotations to images built with GitHub Actions, see You can also add annotations to an image created using `docker buildx imagetools create`. This command only supports adding annotations to an index or manifest descriptors, see -[CLI reference](/reference/cli/docker/buildx/imagetools/create.md#annotations). +[CLI reference](/reference/cli/docker/buildx/imagetools/create.md#annotation). ## Inspect annotations diff --git a/content/manuals/build/metadata/attestations/_index.md b/content/manuals/build/metadata/attestations/_index.md index fc9530a05b5e..1d10af81c350 100644 --- a/content/manuals/build/metadata/attestations/_index.md +++ b/content/manuals/build/metadata/attestations/_index.md @@ -70,7 +70,7 @@ $ docker buildx build --sbom=true --provenance=true . > You can disable provenance attestations using the `--provenance=false` flag, > or by setting the [`BUILDX_NO_DEFAULT_ATTESTATIONS`](/manuals/build/building/variables.md#buildx_no_default_attestations) environment variable. > -> Using the `--provenance=true` flag attaches provenance attestations with `mode=max` +> Using the `--provenance=true` flag attaches provenance attestations with `mode=min` > by default. See [Provenance attestation](./slsa-provenance.md) for more details. BuildKit generates the attestations when building the image. The attestation @@ -95,6 +95,8 @@ the attestations to an image manifest, since it's outputting a directory of files or a tarball, not an image. Instead, these exporters write the attestations to one or more JSON files in the root directory of the export. +## Example + The following example shows a truncated in-toto JSON representation of an SBOM attestation. @@ -161,6 +163,85 @@ attestation. To deep-dive into the specifics about how attestations are stored, see [Image Attestation Storage (BuildKit)](attestation-storage.md). +## Attestation manifest format + +Attestations are stored as manifests, referenced by the image's index. Each +_attestation manifest_ refers to a single _image manifest_ (one +platform-variant of the image). Attestation manifests contain a single layer, +the "value" of the attestation. + +The following example shows the structure of an attestation manifest: + +```json +{ + "schemaVersion": 2, + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "config": { + "mediaType": "application/vnd.oci.image.config.v1+json", + "size": 167, + "digest": "sha256:916d7437a36dd0e258e64d9c5a373ca5c9618eeb1555e79bd82066e593f9afae" + }, + "layers": [ + { + "mediaType": "application/vnd.in-toto+json", + "size": 1833349, + "digest": "sha256:3138024b98ed5aa8e3008285a458cd25a987202f2500ce1a9d07d8e1420f5491", + "annotations": { + "in-toto.io/predicate-type": "https://spdx.dev/Document" + } + } + ] +} +``` + +### Attestations as OCI artifacts + +You can configure the format of the attestation manifest using the +[`oci-artifact` option](/manuals/build/exporters/image-registry.md#synopsis) +for the `image` and `registry` exporters. If set to `true`, the structure of +the attestation manifest changes as follows: + +- An `artifactType` field is added to the attestation manifest, with a value of `application/vnd.docker.attestation.manifest.v1+json`. +- The `config` field is an [empty descriptor] instead of a "dummy" config. +- A `subject` field is also added, pointing to the image manifest that the attestation refers to. + +[empty descriptor]: https://github.com/opencontainers/image-spec/blob/main/manifest.md#guidance-for-an-empty-descriptor + +The following example shows an attestation with the OCI artifact format: + +```json +{ + "schemaVersion": 2, + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "artifactType": "application/vnd.docker.attestation.manifest.v1+json", + "config": { + "mediaType": "application/vnd.oci.empty.v1+json", + "size": 2, + "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a", + "data": "e30=" + }, + "layers": [ + { + "mediaType": "application/vnd.in-toto+json", + "size": 2208, + "digest": "sha256:6d2f2c714a6bee3cf9e4d3cb9a966b629efea2dd8556ed81f19bd597b3325286", + "annotations": { + "in-toto.io/predicate-type": "https://slsa.dev/provenance/v0.2" + } + } + ], + "subject": { + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "size": 1054, + "digest": "sha256:bc2046336420a2852ecf915786c20f73c4c1b50d7803aae1fd30c971a7d1cead", + "platform": { + "architecture": "amd64", + "os": "linux" + } + } +} +``` + ## What's next Learn more about the available attestation types and how to use them: diff --git a/content/manuals/build/metadata/attestations/sbom.md b/content/manuals/build/metadata/attestations/sbom.md index ffbd5354317d..a272f724446e 100644 --- a/content/manuals/build/metadata/attestations/sbom.md +++ b/content/manuals/build/metadata/attestations/sbom.md @@ -2,15 +2,12 @@ title: SBOM attestations keywords: build, attestations, sbom, spdx, metadata, packages description: | - SBOM build attestations describe the contents of your image, - and the packages used to build it. + SBOM attestations describe what software artifacts an image contains and the artifacts used to create the image. aliases: - /build/attestations/sbom/ --- -Software Bill of Materials (SBOM) attestations describe what software artifacts -an image contains, and artifacts used to create the image. Metadata included in -an SBOM for describing software artifacts may include: +SBOM attestations help ensure [software supply chain transparency](/guides/docker-scout/s3c.md) by verifying the software artifacts an image contains and the artifacts used to create the image. Metadata included in an [SBOM](/guides/docker-scout/sbom.md) for describing software artifacts may include: - Name of the artifact - Version @@ -18,14 +15,9 @@ an SBOM for describing software artifacts may include: - Authors - Unique package identifier -There are benefits to indexing contents of an image during the build, as opposed -to scanning a final image. When scanning happens as part of the build, you're -able to detect software you use to build the image, that may not show up in the -final image. +Indexing the contents of an image during the build has benefits over scanning a final image. When scanning happens as part of the build, you can detect software you used to build the image, which may not show up in the final image. -The SBOMs generated by BuildKit follow the SPDX standard. SBOMs attach to the -final image as a JSON-encoded SPDX document, using the format defined by the -[in-toto SPDX predicate](https://github.com/in-toto/attestation/blob/main/spec/predicates/spdx.md). +Docker supports SBOM generation and attestation through an SLSA-compliant build process using BuildKit and attestations. The SBOMs generated by [BuildKit](/manuals/build/buildkit/_index.md) follow the SPDX standard and attach to the final image as a JSON-encoded SPDX document, using the format defined by the [in-toto SPDX predicate](https://github.com/in-toto/attestation/blob/main/spec/predicates/spdx.md). On this page, you’ll learn how to create, manage, and verify SBOM attestations using Docker tooling. ## Create SBOM attestations diff --git a/content/manuals/build/metadata/attestations/slsa-provenance.md b/content/manuals/build/metadata/attestations/slsa-provenance.md index f3add2da14d6..5da2b8617aef 100644 --- a/content/manuals/build/metadata/attestations/slsa-provenance.md +++ b/content/manuals/build/metadata/attestations/slsa-provenance.md @@ -41,8 +41,8 @@ For an example on how to add provenance attestations with GitHub Actions, see ## Mode You can use the `mode` parameter to define the level of detail to be included in -the provenance attestation. Supported values are `mode=min`, and `mode=max` -(default). +the provenance attestation. Supported values are `mode=min` (default) and +`mode=max`. ### Min @@ -175,7 +175,7 @@ extract the full source code of the Dockerfile used to build the image: ```console $ docker buildx imagetools inspect /: \ --format '{{ range (index .Provenance.SLSA.metadata "https://mobyproject.org/buildkit@v1#metadata").source.infos }}{{ if eq .filename "Dockerfile" }}{{ .data }}{{ end }}{{ end }}' | base64 -d -FROM ubuntu:20.04 +FROM ubuntu:24.04 RUN apt-get update ... ``` diff --git a/content/manuals/build/release-notes.md b/content/manuals/build/release-notes.md index 0c995aaf89bc..aeae1f672679 100644 --- a/content/manuals/build/release-notes.md +++ b/content/manuals/build/release-notes.md @@ -10,13 +10,159 @@ toc_max: 2 This page contains information about the new features, improvements, and bug fixes in [Docker Buildx](https://github.com/docker/buildx). +## 0.25.0 + +{{< release-date date="2025-06-17" >}} + +The full release notes for this release are available +[on GitHub](https://github.com/docker/buildx/releases/tag/v0.25.0). + +### New + +- Bake now supports defining `extra-hosts`. [docker/buildx#3234](https://github.com/docker/buildx/pull/3234) + +### Enhancements + +- Add support for bearer token auth. [docker/buildx#3233](https://github.com/docker/buildx/pull/3233) +- Add custom exit codes for internal, resource, and canceled errors in commands. [docker/buildx#3214](https://github.com/docker/buildx/pull/3214) +- Show variable type when using `--list=variables` with Bake. [docker/buildx#3207](https://github.com/docker/buildx/pull/3207) +- Consider typed, value-less variables to have `null` value in Bake. [docker/buildx#3198](https://github.com/docker/buildx/pull/3198) +- Add support for multiple IPs in extra hosts configuration. [docker/buildx#3244](https://github.com/docker/buildx/pull/3244) +- Support for updated SLSA V1 provenance in `buildx history` commands. [docker/buildx#3245](https://github.com/docker/buildx/pull/3245) +- Add support for `RegistryToken` configuration in imagetools commands. [docker/buildx#3233](https://github.com/docker/buildx/pull/3233) + +### Bug fixes + +- Fix `keep-storage` flag deprecation notice for `prune` command. [docker/buildx#3216](https://github.com/docker/buildx/pull/3216) + +## 0.24.0 + +{{< release-date date="2025-05-21" >}} + +The full release notes for this release are available +[on GitHub](https://github.com/docker/buildx/releases/tag/v0.24.0). + +### Enhancements + +- New `type` attribute added to the `variable` block in Bake to allow explicit typing of variables. [docker/buildx#3167](https://github.com/docker/buildx/pull/3167), [docker/buildx#3189](https://github.com/docker/buildx/pull/3189), [docker/buildx#3198](https://github.com/docker/buildx/pull/3198) +- New `--finalize` flag added to the `history export` command to finalize build traces before exporting. [docker/buildx#3152](https://github.com/docker/buildx/pull/3152) +- Compose compatibility has been updated to v2.6.3. [docker/buildx#3191](https://github.com/docker/buildx/pull/3191), [docker/buildx#3171](https://github.com/docker/buildx/pull/3171) + +### Bug fixes + +- Fix issue where some builds may leave behind temporary files after completion. [docker/buildx#3133](https://github.com/docker/buildx/pull/3133) +- Fix wrong image ID returned when building with Docker when containerd-snapshotter is enabled. [docker/buildx#3136](https://github.com/docker/buildx/pull/3136) +- Fix possible panic when using empty `call` definition with Bake. [docker/buildx#3168](https://github.com/docker/buildx/pull/3168) +- Fix possible malformed Dockerfile path with Bake on Windows. [docker/buildx#3141](https://github.com/docker/buildx/pull/3141) +- Fix current builder not being available in JSON output for `ls` command. [docker/buildx#3179](https://github.com/docker/buildx/pull/3179) +- Fix OTEL context not being propagated to Docker daemon. [docker/buildx#3146](https://github.com/docker/buildx/pull/3146) + +## 0.23.0 + +{{< release-date date="2025-04-15" >}} + +The full release notes for this release are available +[on GitHub](https://github.com/docker/buildx/releases/tag/v0.23.0). + +### New + +- New `buildx history export` command allows exporting the build record into a bundle that can be imported to [Docker Desktop](/desktop/). [docker/buildx#3073](https://github.com/docker/buildx/pull/3073) + +### Enhancements + +- New `--local` and `--filter` flags allow filtering history records in `buildx history ls`. [docker/buildx#3091](https://github.com/docker/buildx/pull/3091) +- Compose compatibility has been updated to v2.6.0. [docker/buildx#3080](https://github.com/docker/buildx/pull/3080), [docker/buildx#3105](https://github.com/docker/buildx/pull/3105) +- Support CLI environment variables in standalone mode. [docker/buildx#3087](https://github.com/docker/buildx/pull/3087) + +### Bug fixes + +- Fix `--print` output for Bake producing output with unescaped variables that could cause build errors later. [docker/buildx#3097](https://github.com/docker/buildx/pull/3097) +- Fix `additional_contexts` field not working correctly when pointing to another service. [docker/buildx#3090](https://github.com/docker/buildx/pull/3090) +- Fix empty validation block crashing the Bake HCL parser. [docker/buildx#3101](https://github.com/docker/buildx/pull/3101) + +## 0.22.0 + +{{< release-date date="2025-03-18" >}} + +The full release notes for this release are available +[on GitHub](https://github.com/docker/buildx/releases/tag/v0.22.0). + +### New + +- New command `buildx history import` lets you import build records into Docker Desktop for further debugging in the [Build UI](/desktop/use-desktop/builds/). This command requires [Docker Desktop](/desktop/) to be installed. [docker/buildx#3039](https://github.com/docker/buildx/pull/3039) + +### Enhancements + +- History records can now be opened by offset from the latest in `history inspect`, `history logs` and `history open` commands (e.g. `^1`). [docker/buildx#3049](https://github.com/docker/buildx/pull/3049), [docker/buildx#3055](https://github.com/docker/buildx/pull/3055) +- Bake now supports the `+=` operator to append when using `--set` for overrides. [docker/buildx#3031](https://github.com/docker/buildx/pull/3031) +- Docker container driver adds GPU devices to the container if available. [docker/buildx#3063](https://github.com/docker/buildx/pull/3063) +- Annotations can now be set when using overrides with Bake. [docker/buildx#2997](https://github.com/docker/buildx/pull/2997) +- NetBSD binaries are now included in the release. [docker/buildx#2901](https://github.com/docker/buildx/pull/2901) +- The `inspect` and `create` commands now return an error if a node fails to boot. [docker/buildx#3062](https://github.com/docker/buildx/pull/3062) + +### Bug fixes + +- Fix double pushing with Docker driver when the containerd image store is enabled. [docker/buildx#3023](https://github.com/docker/buildx/pull/3023) +- Fix multiple tags being pushed for `imagetools create` command. Now only the final manifest pushes by tag. [docker/buildx#3024](https://github.com/docker/buildx/pull/3024) + +## 0.21.0 + +{{< release-date date="2025-02-19" >}} + +The full release notes for this release are available +[on GitHub](https://github.com/docker/buildx/releases/tag/v0.21.0). + +### New + +- New command `buildx history trace` lets you inspect traces of a build in a Jaeger UI-based viewer and compare one trace with another. [docker/buildx#2904](https://github.com/docker/buildx/pull/2904) + +### Enhancements + +- The history inspection command `buildx history inspect` now supports custom formatting with `--format` flag and JSON formatting for machine-readable output. [docker/buildx#2964](https://github.com/docker/buildx/pull/2964) +- Support for CDI device entitlement in build and bake. [docker/buildx#2994](https://github.com/docker/buildx/pull/2994) +- Supported CDI devices are now shown in the builder inspection. [docker/buildx#2983](https://github.com/docker/buildx/pull/2983) +- When using [GitHub Cache backend `type=gha`](cache/backends/gha.md), the URL for the Version 2 or API is now read from the environment and sent to BuildKit. Version 2 backend requires BuildKit v0.20.0 or later. [docker/buildx#2983](https://github.com/docker/buildx/pull/2983), [docker/buildx#3001](https://github.com/docker/buildx/pull/3001) + +### Bug fixes + +- Avoid unnecessary warnings and prompts when using `--progress=rawjson`. [docker/buildx#2957](https://github.com/docker/buildx/pull/2957) +- Fix regression with debug shell sometimes not working correctly on `--on=error`. [docker/buildx#2958](https://github.com/docker/buildx/pull/2958) +- Fix possible panic errors when using an unknown variable in the Bake definition. [docker/buildx#2960](https://github.com/docker/buildx/pull/2960) +- Fix invalid duplicate output on JSON format formatting of `buildx ls` command. [docker/buildx#2970](https://github.com/docker/buildx/pull/2970) +- Fix bake handling cache imports with CSV string containing multiple registry references. [docker/buildx#2944](https://github.com/docker/buildx/pull/2944) +- Fix issue where error from pulling BuildKit image could be ignored. [docker/buildx#2988](https://github.com/docker/buildx/pull/2988) +- Fix race on pausing progress on debug shell. [docker/buildx#3003](https://github.com/docker/buildx/pull/3003) + +## 0.20.1 + +{{< release-date date="2025-01-23" >}} + +The full release notes for this release are available +[on GitHub](https://github.com/docker/buildx/releases/tag/v0.20.1). + +### Bug fixes + +- Fix `bake --print` output after missing some attributes for attestations. [docker/buildx#2937](https://github.com/docker/buildx/pull/2937) +- Fix allowing comma-separated image reference strings for cache import and export values. [docker/buildx#2944](https://github.com/docker/buildx/pull/2944) + ## 0.20.0 {{< release-date date="2025-01-20" >}} -The full release note for this release is available +The full release notes for this release are available [on GitHub](https://github.com/docker/buildx/releases/tag/v0.20.0). +> [!NOTE] +> +> This version of buildx enables filesystem entitlement checks for `buildx bake` +> command by default. If your Bake definition needs to read or write files +> outside your current working directory, you need to allow access to these +> paths with `--allow fs=`. On the terminal, you can also interactively +> approve these paths with the provided prompt. Optionally, you can disable +> these checks by setting `BUILDX_BAKE_ENTITLEMENTS_FS=0`. This validation +> produced a warning in Buildx v0.19.0+, but starting from current release it +> produces an error. For more information, see the [reference documentation](/reference/cli/docker/buildx/bake.md#allow). + ### New - New `buildx history` command has been added that allows working with build records of completed and running builds. You can use these commands to list, inspect, remove your builds, replay the logs of already completed builds, and quickly open your builds in Docker Desktop Build UI for further debugging. This is an early version of this command and we expect to add more features in the future releases. [#2891](https://github.com/docker/buildx/pull/2891), [#2925](https://github.com/docker/buildx/pull/2925) @@ -46,7 +192,7 @@ The full release note for this release is available {{< release-date date="2024-11-27" >}} -The full release note for this release is available +The full release notes for this release are available [on GitHub](https://github.com/docker/buildx/releases/tag/v0.19.1). ### Bug fixes @@ -61,7 +207,7 @@ The full release note for this release is available {{< release-date date="2024-11-27" >}} -The full release note for this release is available +The full release notes for this release are available [on GitHub](https://github.com/docker/buildx/releases/tag/v0.19.0). ### New @@ -103,7 +249,7 @@ The full release note for this release is available {{< release-date date="2024-10-31" >}} -The full release note for this release is available +The full release notes for this release are available [on GitHub](https://github.com/docker/buildx/releases/tag/v0.18.0). ### New @@ -138,7 +284,7 @@ The full release note for this release is available {{< release-date date="2024-09-13" >}} -The full release note for this release is available +The full release notes for this release are available [on GitHub](https://github.com/docker/buildx/releases/tag/v0.17.1). ### Bug fixes @@ -157,7 +303,7 @@ The full release note for this release is available {{< release-date date="2024-09-10" >}} -The full release note for this release is available +The full release notes for this release are available [on GitHub](https://github.com/docker/buildx/releases/tag/v0.17.0). ### New @@ -209,7 +355,7 @@ The full release note for this release is available {{< release-date date="2024-07-25" >}} -The full release note for this release is available +The full release notes for this release are available [on GitHub](https://github.com/docker/buildx/releases/tag/v0.16.2). ### Bug fixes @@ -220,7 +366,7 @@ The full release note for this release is available {{< release-date date="2024-07-18" >}} -The full release note for this release is available +The full release notes for this release are available [on GitHub](https://github.com/docker/buildx/releases/tag/v0.16.1). ### Bug fixes @@ -232,7 +378,7 @@ The full release note for this release is available {{< release-date date="2024-07-11" >}} -The full release note for this release is available +The full release notes for this release are available [on GitHub](https://github.com/docker/buildx/releases/tag/v0.16.0). ### New @@ -266,7 +412,7 @@ The full release note for this release is available {{< release-date date="2024-06-18" >}} -The full release note for this release is available +The full release notes for this release are available [on GitHub](https://github.com/docker/buildx/releases/tag/v0.15.1). ### Bug fixes @@ -278,7 +424,7 @@ The full release note for this release is available {{< release-date date="2024-06-11" >}} -The full release note for this release is available +The full release notes for this release are available [on GitHub](https://github.com/docker/buildx/releases/tag/v0.15.0). ### New @@ -307,7 +453,7 @@ The full release note for this release is available {{< release-date date="2024-04-18" >}} -The full release note for this release is available +The full release notes for this release are available [on GitHub](https://github.com/docker/buildx/releases/tag/v0.14.0). ### Enhancements @@ -355,7 +501,7 @@ The full release note for this release is available {{< release-date date="2024-03-13" >}} -The full release note for this release is available +The full release notes for this release are available [on GitHub](https://github.com/docker/buildx/releases/tag/v0.13.1). ### Bug fixes @@ -367,7 +513,7 @@ The full release note for this release is available {{< release-date date="2024-03-06" >}} -The full release note for this release is available +The full release notes for this release are available [on GitHub](https://github.com/docker/buildx/releases/tag/v0.13.0). ### New @@ -409,7 +555,7 @@ The full release note for this release is available {{< release-date date="2024-01-12" >}} -The full release note for this release is available +The full release notes for this release are available [on GitHub](https://github.com/docker/buildx/releases/tag/v0.12.1). ### Bug fixes and enhancements @@ -421,7 +567,7 @@ The full release note for this release is available {{< release-date date="2023-11-16" >}} -The full release note for this release is available +The full release notes for this release are available [on GitHub](https://github.com/docker/buildx/releases/tag/v0.12.0). ### New @@ -514,7 +660,7 @@ The full release note for this release is available {{< release-date date="2023-07-18" >}} -The full release note for this release is available +The full release notes for this release are available [on GitHub](https://github.com/docker/buildx/releases/tag/v0.11.2). ### Bug fixes and enhancements @@ -528,7 +674,7 @@ The full release note for this release is available {{< release-date date="2023-07-05" >}} -The full release note for this release is available +The full release notes for this release are available [on GitHub](https://github.com/docker/buildx/releases/tag/v0.11.1). ### Bug fixes and enhancements @@ -546,7 +692,7 @@ The full release note for this release is available {{< release-date date="2023-06-13" >}} -The full release note for this release is available +The full release notes for this release are available [on GitHub](https://github.com/docker/buildx/releases/tag/v0.11.0). ### New @@ -625,7 +771,7 @@ The full release note for this release is available {{< release-date date="2023-03-06" >}} -{{< include "buildx-v0.10-disclaimer.md" >}} +{{% include "buildx-v0.10-disclaimer.md" %}} ### Bug fixes and enhancements @@ -637,7 +783,7 @@ The full release note for this release is available {{< release-date date="2023-02-16" >}} -{{< include "buildx-v0.10-disclaimer.md" >}} +{{% include "buildx-v0.10-disclaimer.md" %}} ### Bug fixes and enhancements @@ -651,7 +797,7 @@ The full release note for this release is available {{< release-date date="2023-01-30" >}} -{{< include "buildx-v0.10-disclaimer.md" >}} +{{% include "buildx-v0.10-disclaimer.md" %}} ### Bug fixes and enhancements @@ -665,7 +811,7 @@ The full release note for this release is available {{< release-date date="2023-01-27" >}} -{{< include "buildx-v0.10-disclaimer.md" >}} +{{% include "buildx-v0.10-disclaimer.md" %}} ### Bug fixes and enhancements @@ -682,7 +828,7 @@ The full release note for this release is available {{< release-date date="2023-01-10" >}} -{{< include "buildx-v0.10-disclaimer.md" >}} +{{% include "buildx-v0.10-disclaimer.md" %}} ### New diff --git a/content/manuals/cloud/index.md b/content/manuals/cloud/index.md deleted file mode 100644 index 7f6517780766..000000000000 --- a/content/manuals/cloud/index.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: Cloud integrations -description: ACI and ECS integration information -keywords: compose, ACI, ECS, amazon, cloud integration -params: - sidebar: - group: Products -aliases: -- /engine/context/aci-integration/ -- /cloud/aci-integration/ -- /cloud/ecs-integration/ -- /cloud/ecs-compose-examples/ -- /cloud/ecs-compose-features/ -- /cloud/ecs-architecture/ -- /cloud/aci-compose-features/ -- /cloud/aci-container-features/ -- /engine/context/ecs-integration/ ---- - -Docker Compose's integration for Amazon's Elastic Container Service and Azure Container Instances has retired. The integration documentation is no longer available through the Docker Docs site. - -However, you can still access the relevant documentation in the [Compose CLI repository](https://github.com/docker/compose-cli/tree/main/docs). - diff --git a/content/manuals/compose/_index.md b/content/manuals/compose/_index.md index 71c95c12a21c..79d08ec6bff3 100644 --- a/content/manuals/compose/_index.md +++ b/content/manuals/compose/_index.md @@ -3,9 +3,7 @@ title: Docker Compose weight: 30 description: Learn how to use Docker Compose to define and run multi-container applications with this detailed introduction to the tool. -keywords: docker compose, docker-compose, docker compose command, docker compose files, - docker compose documentation, using docker compose, compose container, docker compose - service +keywords: docker compose, docker-compose, compose.yaml, docker compose command, multi-container applications, container orchestration, docker cli params: sidebar: group: Open source @@ -36,12 +34,16 @@ grid: Docker application. icon: polyline link: /reference/compose-file +- title: Use Compose Bridge + description: Transform your Compose configuration file into configuration files for different platforms, such as Kubernetes. + icon: move_down + link: /compose/bridge - title: Browse common FAQs description: Explore general FAQs and find out how to give feedback. icon: help link: /compose/faq -- title: Migrate to Compose V2 - description: Learn how to migrate from Compose V1 to V2 +- title: Migrate to Compose v2 + description: Learn how to migrate from Compose v1 to v2 icon: folder_delete link: /compose/releases/migrate/ aliases: @@ -55,15 +57,15 @@ aliases: Docker Compose is a tool for defining and running multi-container applications. It is the key to unlocking a streamlined and efficient development and deployment experience. -Compose simplifies the control of your entire application stack, making it easy to manage services, networks, and volumes in a single, comprehensible YAML configuration file. Then, with a single command, you create and start all the services +Compose simplifies the control of your entire application stack, making it easy to manage services, networks, and volumes in a single YAML configuration file. Then, with a single command, you create and start all the services from your configuration file. -Compose works in all environments; production, staging, development, testing, as +Compose works in all environments - production, staging, development, testing, as well as CI workflows. It also has commands for managing the whole lifecycle of your application: - * Start, stop, and rebuild services - * View the status of running services - * Stream the log output of running services - * Run a one-off command on a service + - Start, stop, and rebuild services + - View the status of running services + - Stream the log output of running services + - Run a one-off command on a service {{< grid >}} diff --git a/content/manuals/compose/bridge/_index.md b/content/manuals/compose/bridge/_index.md index c17ec26aa738..ba857555e46d 100644 --- a/content/manuals/compose/bridge/_index.md +++ b/content/manuals/compose/bridge/_index.md @@ -1,6 +1,6 @@ --- -description: Understand what Compose Bridge is and how it can be useful -keywords: compose, orchestration, kubernetes, bridge +description: Learn how Compose Bridge transforms Docker Compose files into Kubernetes manifests for seamless platform transitions +keywords: docker compose bridge, compose to kubernetes, docker compose kubernetes integration, docker compose kustomize, compose bridge docker desktop title: Overview of Compose Bridge linkTitle: Compose Bridge weight: 50 @@ -8,7 +8,7 @@ weight: 50 {{< summary-bar feature_name="Compose bridge" >}} -Compose Bridge lets you transform your Compose configuration file into configuration files for different platforms, primarily focusing on Kubernetes. The default transformation generates Kubernetes manifests and a Kustomize overlay which are designed for deployment on Docker Desktop with Kubernetes enabled. +Compose Bridge converts your Docker Compose configuration into platform-specific formats—primarily Kubernetes manifests. The default transformation generates Kubernetes manifests and a Kustomize overlay which are designed for deployment on Docker Desktop with Kubernetes enabled. It's a flexible tool that lets you either take advantage of the [default transformation](usage.md) or [create a custom transformation](customize.md) to suit specific project needs and requirements. @@ -24,21 +24,7 @@ Compose Bridge provides its own transformation for Kubernetes using Go templates For more detailed information on how these transformations work and how you can customize them for your projects, see [Customize](customize.md). -## Setup - -To get started with Compose Bridge, you need to: - -1. Download and install Docker Desktop version 4.33 and later. -2. Sign in to your Docker account. -3. Navigate to the **Features in development** tab in **Settings**. -4. From the **Experimental features** tab, select **Enable Compose Bridge**. - -## Feedback - -To give feedback, report bugs, or receive support, email `desktop-preview@docker.com`. There is also a dedicated Slack channel. To join, simply send an email to the provided address. - ## What's next? - [Use Compose Bridge](usage.md) -- [Explore how you can customize Compose Bridge](customize.md) -- [Explore the advanced integration](advanced-integration.md) +- [Explore how you can customize Compose Bridge](customize.md) \ No newline at end of file diff --git a/content/manuals/compose/bridge/advanced-integration.md b/content/manuals/compose/bridge/advanced-integration.md deleted file mode 100644 index db9e71837542..000000000000 --- a/content/manuals/compose/bridge/advanced-integration.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Advanced integration -linkTitle: Advanced -weight: 30 -description: Learn about how Compose Bridge can function a kubectl plugin -keywords: kubernetes, compose, compose bridge, plugin, advanced ---- - -{{< summary-bar feature_name="Compose bridge" >}} - -Compose Bridge can also function as a `kubectl` plugin, allowing you to integrate its capabilities directly into your Kubernetes command-line operations. This integration simplifies the process of converting and deploying applications from Docker Compose to Kubernetes. - -## Use `compose-bridge` as a `kubectl` plugin - -To use the `compose-bridge` binary as a `kubectl` plugin, you need to make sure that the binary is available in your PATH and the name of the binary is prefixed with `kubectl-`. - -1. Rename or copy the `compose-bridge` binary to `kubectl-compose_bridge`: - - ```console - $ mv /path/to/compose-bridge /usr/local/bin/kubectl-compose_bridge - ``` - -2. Ensure that the binary is executable: - - ```console - $ chmod +x /usr/local/bin/kubectl-compose_bridge - ``` - -3. Verify that the plugin is recognized by `kubectl`: - - ```console - $ kubectl plugin list - ``` - - In the output, you should see `kubectl-compose_bridge`. - -4. Now you can use `compose-bridge` as a `kubectl` plugin: - - ```console - $ kubectl compose-bridge [command] - ``` - -Replace `[command]` with any `compose-bridge` command you want to use. diff --git a/content/manuals/compose/bridge/customize.md b/content/manuals/compose/bridge/customize.md index 62bdcb880ddb..36e6100805e7 100644 --- a/content/manuals/compose/bridge/customize.md +++ b/content/manuals/compose/bridge/customize.md @@ -2,13 +2,14 @@ title: Customize Compose Bridge linkTitle: Customize weight: 20 -description: Learn about the Compose Bridge templates syntax -keywords: compose, bridge, templates +description: Learn how to customize Compose Bridge transformations using Go templates and Compose extensions +keywords: docker compose bridge, customize compose bridge, compose bridge templates, compose to kubernetes, compose bridge transformation, go templates docker + --- {{< summary-bar feature_name="Compose bridge" >}} -This page explains how Compose Bridge utilizes templating to efficiently translate Docker Compose files into Kubernetes manifests. It also explain how you can customize these templates for your specific requirements and needs, or how you can build your own transformation. +This page explains how Compose Bridge utilizes templating to efficiently translate Docker Compose files into Kubernetes manifests. It also explains how you can customize these templates for your specific requirements and needs, or how you can build your own transformation. ## How it works @@ -16,11 +17,11 @@ Compose bridge uses transformations to let you convert a Compose model into anot A transformation is packaged as a Docker image that receives the fully-resolved Compose model as `/in/compose.yaml` and can produce any target format file under `/out`. -Compose Bridge provides its transformation for Kubernetes using Go templates, so that it is easy to extend for customization by just replacing or appending your own templates. +Compose Bridge includes a default Kubernetes transformation using Go templates, which you can customize by replacing or extending templates. ### Syntax -Compose Bridge make use of templates to transform a Compose configuration file into Kubernetes manifests. Templates are plain text files that use the [Go templating syntax](https://pkg.go.dev/text/template). This enables the insertion of logic and data, making the templates dynamic and adaptable according to the Compose model. +Compose Bridge makes use of templates to transform a Compose configuration file into Kubernetes manifests. Templates are plain text files that use the [Go templating syntax](https://pkg.go.dev/text/template). This enables the insertion of logic and data, making the templates dynamic and adaptable according to the Compose model. When a template is executed, it must produce a YAML file which is the standard format for Kubernetes manifests. Multiple files can be generated as long as they are separated by `---` @@ -44,7 +45,7 @@ key: value ### Input -The input Compose model is the canonical YAML model you can get by running `docker compose config`. Within the templates, data from the `compose.yaml` is accessed using dot notation, allowing you to navigate through nested data structures. For example, to access the deployment mode of a service, you would use `service.deploy.mode`: +You can generate the input model by running `docker compose config`. This canonical YAML output serves as the input for Compose Bridge transformations. Within the templates, data from the `compose.yaml` is accessed using dot notation, allowing you to navigate through nested data structures. For example, to access the deployment mode of a service, you would use `service.deploy.mode`: ```yaml # iterate over a yaml sequence @@ -86,12 +87,12 @@ In the following example, the template checks if a healthcheck interval is speci As Kubernetes is a versatile platform, there are many ways to map Compose concepts into Kubernetes resource definitions. Compose Bridge lets you customize the transformation to match your own infrastructure -decisions and preferences, with various level of flexibility and effort. +decisions and preferences, with varying level of flexibility and effort. ### Modify the default templates You can extract templates used by the default transformation `docker/compose-bridge-kubernetes`, -by running `compose-bridge transformations create --from docker/compose-bridge-kubernetes my-template` +by running `docker compose bridge transformations create --from docker/compose-bridge-kubernetes my-template` and adjusting the templates to match your needs. The templates are extracted into a directory named after your template name, in this case `my-template`. @@ -106,7 +107,7 @@ $ docker build --tag mycompany/transform --push . You can then use your transformation as a replacement: ```console -$ compose-bridge convert --transformations mycompany/transform +$ docker compose bridge convert --transformations mycompany/transform ``` ### Add your own templates @@ -133,8 +134,8 @@ metadata: spec: rules: {{ range $name, $service := .services }} -{{ if $service.x-virtual-host }} - - host: ${{ $service.x-virtual-host }} +{{ range index $service "x-virtual-host" }} + - host: ${{ . }} http: paths: - path: "/" @@ -152,7 +153,7 @@ when transforming Compose models into Kubernetes in addition to other transformations: ```console -$ compose-bridge convert \ +$ docker compose bridge convert \ --transformation docker/compose-bridge-kubernetes \ --transformation mycompany/transform ``` @@ -184,7 +185,3 @@ CMD ["/usr/bin/kompose", "convert", "-f", "/in/compose.yaml", "--out", "/out"] This Dockerfile bundles Kompose and defines the command to run this tool according to the Compose Bridge transformation contract. - -## What's next? - -- [Explore the advanced integration](advanced-integration.md) diff --git a/content/manuals/compose/bridge/usage.md b/content/manuals/compose/bridge/usage.md index 091457fbeefb..d2b6b2f620f3 100644 --- a/content/manuals/compose/bridge/usage.md +++ b/content/manuals/compose/bridge/usage.md @@ -2,13 +2,13 @@ title: Use the default Compose Bridge transformation linkTitle: Usage weight: 10 -description: Learn about and use the Compose Bridge default transformation -keywords: compose, bridge, kubernetes +description: Learn how to use the default Compose Bridge transformation to convert Compose files into Kubernetes manifests +keywords: docker compose bridge, compose kubernetes transform, kubernetes from compose, compose bridge convert, compose.yaml to kubernetes --- {{< summary-bar feature_name="Compose bridge" >}} -Compose Bridge supplies an out-of-the box transformation for your Compose configuration file. Based on an arbitrary `compose.yaml` file, Compose Bridge produces: +Compose Bridge supplies an out-of-the-box transformation for your Compose configuration file. Based on an arbitrary `compose.yaml` file, Compose Bridge produces: - A [Namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) so all your resources are isolated and don't conflict with resources from other deployments. - A [ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) with an entry for each and every [config](/reference/compose-file/configs.md) resource in your Compose application. @@ -29,14 +29,15 @@ It also supplies a Kustomize overlay dedicated to Docker Desktop with: To use the default transformation run the following command: ```console -$ compose-bridge convert +$ docker compose bridge convert ``` Compose looks for a `compose.yaml` file inside the current directory and then converts it. -The following output is displayed +When successful, Compose Bridge generates Kubernetes manifests and logs output similar to the following: + ```console -$ compose-bridge convert -f compose.yaml +$ docker compose bridge convert -f compose.yaml Kubernetes resource api-deployment.yaml created Kubernetes resource db-deployment.yaml created Kubernetes resource web-deployment.yaml created @@ -62,29 +63,28 @@ These files are then stored within your project in the `/out` folder. The Kubernetes manifests can then be used to run the application on Kubernetes using the standard deployment command `kubectl apply -k out/overlays/desktop/`. -> [!NOTE] +> [!IMPORTANT] > > Make sure you have enabled Kubernetes in Docker Desktop before you deploy your Compose Bridge transformations. If you want to convert a `compose.yaml` file that is located in another directory, you can run: ```console -$ compose-bridge convert -f /compose.yaml +$ docker compose bridge convert -f /compose.yaml ``` To see all available flags, run: ```console -$ compose-bridge convert --help +$ docker compose bridge convert --help ``` > [!TIP] > -> You can now convert and deploy your Compose project to a Kubernetes cluster from the Compose file viewer. +> You can convert and deploy your Compose project to a Kubernetes cluster from the Compose file viewer. > > Make sure you are signed in to your Docker account, navigate to your container in the **Containers** view, and in the top-right corner select **View configurations** and then **Convert and Deploy to Kubernetes**. ## What's next? -- [Explore how you can customize Compose Bridge](customize.md) -- [Explore the advanced integration](advanced-integration.md) +- [Explore how you can customize Compose Bridge](customize.md) \ No newline at end of file diff --git a/content/manuals/compose/gettingstarted.md b/content/manuals/compose/gettingstarted.md index 5475e9e1139e..dcbfd3fd5c1b 100644 --- a/content/manuals/compose/gettingstarted.md +++ b/content/manuals/compose/gettingstarted.md @@ -1,5 +1,5 @@ --- -description: Check out this tutorial on how to use Docker Compose from defining application +description: Follow this hands-on tutorial to learn how to use Docker Compose from defining application dependencies to experimenting with commands. keywords: docker compose example, docker compose tutorial, how to use docker compose, running docker compose, how to run docker compose, docker compose build image, docker @@ -16,13 +16,13 @@ Using the Flask framework, the application features a hit counter in Redis, prov The concepts demonstrated here should be understandable even if you're not familiar with Python. -This is a non-normative example that just highlights the key things you can do with Compose. +This is a non-normative example that demonstrates core Compose functionality. ## Prerequisites Make sure you have: -- Installed the latest version of Docker Compose +- [Installed the latest version of Docker Compose](/manuals/compose/install/_index.md) - A basic understanding of Docker concepts and how Docker works ## Step 1: Set up diff --git a/content/manuals/compose/how-tos/dependent-images.md b/content/manuals/compose/how-tos/dependent-images.md new file mode 100644 index 000000000000..ba9e44ff31e2 --- /dev/null +++ b/content/manuals/compose/how-tos/dependent-images.md @@ -0,0 +1,169 @@ +--- +description: Build images for services with shared definition +keywords: compose, build +title: Build dependent images +weight: 50 +--- + +{{< summary-bar feature_name="Compose dependent images" >}} + +To reduce push/pull time and image weight, a common practice for Compose applications is to have services +share base layers as much as possible. You typically select the same operating system base image for +all services. But you can also get one step further by sharing image layers when your images share the same +system packages. The challenge to address is then to avoid repeating the exact same Dockerfile instruction +in all services. + +For illustration, this page assumes you want all your services to be built with an `alpine` base +image and install the system package `openssl`. + +## Multi-stage Dockerfile + +The recommended approach is to group the shared declaration in a single Dockerfile, and use multi-stage features +so that service images are built from this shared declaration. + +Dockerfile: + +```dockerfile +FROM alpine as base +RUN /bin/sh -c apk add --update --no-cache openssl + +FROM base as service_a +# build service a +... + +FROM base as service_b +# build service b +... +``` + +Compose file: + +```yaml +services: + a: + build: + target: service_a + b: + build: + target: service_b +``` + +## Use another service's image as the base image + +A popular pattern is to reuse a service image as a base image in another service. +As Compose does not parse the Dockerfile, it can't automatically detect this dependency +between services to correctly order the build execution. + +a.Dockerfile: + +```dockerfile +FROM alpine +RUN /bin/sh -c apk add --update --no-cache openssl +``` + +b.Dockerfile: + +```dockerfile +FROM service_a +# build service b +``` + +Compose file: + +```yaml +services: + a: + image: service_a + build: + dockerfile: a.Dockerfile + b: + image: service_b + build: + dockerfile: b.Dockerfile +``` + +Legacy Docker Compose v1 used to build images sequentially, which made this pattern usable +out of the box. Compose v2 uses BuildKit to optimise builds and build images in parallel +and requires an explicit declaration. + +The recommended approach is to declare the dependent base image as an additional build context: + +Compose file: + +```yaml +services: + a: + image: service_a + build: + dockerfile: a.Dockerfile + b: + image: service_b + build: + dockerfile: b.Dockerfile + additional_contexts: + # `FROM service_a` will be resolved as a dependency on service "a" which has to be built first + service_a: "service:a" +``` + +With the `additional_contexts` attribute, you can refer to an image built by another service without needing to explicitly name it: + +b.Dockerfile: + +```dockerfile + +FROM base_image +# `base_image` doesn't resolve to an actual image. This is used to point to a named additional context + +# build service b +``` + +Compose file: + +```yaml +services: + a: + build: + dockerfile: a.Dockerfile + # built image will be tagged _a + b: + build: + dockerfile: b.Dockerfile + additional_contexts: + # `FROM base_image` will be resolved as a dependency on service "a" which has to be built first + base_image: "service:a" +``` + +## Build with Bake + +Using [Bake](/manuals/build/bake/_index.md) let you pass the complete build definition for all services +and to orchestrate build execution in the most efficient way. + +To enable this feature, run Compose with the `COMPOSE_BAKE=true` variable set in your environment. + +```console +$ COMPOSE_BAKE=true docker compose build +[+] Building 0.0s (0/1) + => [internal] load local bake definitions 0.0s +... +[+] Building 2/2 manifest list sha256:4bd2e88a262a02ddef525c381a5bdb08c83 0.0s + ✔ service_b Built 0.7s + ✔ service_a Built +``` + +Bake can also be selected as the default builder by editing your `$HOME/.docker/config.json` config file: +```json +{ + ... + "plugins": { + "compose": { + "build": "bake" + } + } + ... +} +``` + +## Additional resources + +- [Docker Compose build reference](/reference/cli/docker/compose/build.md) +- [Learn about multi-stage Dockerfiles](/manuals/build/building/multi-stage.md) diff --git a/content/manuals/compose/how-tos/environment-variables/_index.md b/content/manuals/compose/how-tos/environment-variables/_index.md index a2ddb86929a7..0775edc2665d 100644 --- a/content/manuals/compose/how-tos/environment-variables/_index.md +++ b/content/manuals/compose/how-tos/environment-variables/_index.md @@ -2,14 +2,13 @@ title: Environment variables in Compose linkTitle: Use environment variables weight: 40 -description: Explainer on the ways to set, use and manage environment variables in - Compose +description: Explains how to set, use, and manage environment variables in Docker Compose. keywords: compose, orchestration, environment, env file aliases: - /compose/environment-variables/ --- -By leveraging environment variables and interpolation in Docker Compose, you can create versatile and reusable configurations, making your Dockerized applications easier to manage and deploy across different environments. +Environment variables and interpolation in Docker Compose help you create reusable, flexible configurations. This makes Dockerized applications easier to manage and deploy across environments. > [!TIP] > diff --git a/content/manuals/compose/how-tos/environment-variables/envvars-precedence.md b/content/manuals/compose/how-tos/environment-variables/envvars-precedence.md index 9123504ccc9d..8197d8f18f7a 100644 --- a/content/manuals/compose/how-tos/environment-variables/envvars-precedence.md +++ b/content/manuals/compose/how-tos/environment-variables/envvars-precedence.md @@ -12,7 +12,7 @@ aliases: When the same environment variable is set in multiple sources, Docker Compose follows a precedence rule to determine the value for that variable in your container's environment. -This page contains information on the level of precedence each method of setting environmental variables takes. +This page explains how Docker Compose determines the final value of an environment variable when it's defined in multiple locations. The order of precedence (highest to lowest) is as follows: 1. Set using [`docker compose run -e` in the CLI](set-environment-variables.md#set-environment-variables-with-docker-compose-run---env). @@ -30,7 +30,7 @@ In the following example, a different value for the same environment variable in $ cat ./webapp.env NODE_ENV=test -$ cat compose.yml +$ cat compose.yaml services: webapp: image: 'webapp' @@ -59,48 +59,53 @@ The columns `Host OS environment` and `.env` file is listed only for illustratio Each row represents a combination of contexts where `VALUE` is set, substituted, or both. The **Result** column indicates the final value for `VALUE` in each scenario. -| # | `docker compose run` | `environment` attribute | `env_file` attribute | Image `ENV` | `Host OS` environment | `.env` file | | Result | -|:--:|:----------------:|:-------------------------------:|:----------------------:|:------------:|:-----------------------:|:-----------------:|:---:|:----------:| -| 1 | - | - | - | - | `VALUE=1.4` | `VALUE=1.3` || - | -| 2 | - | - | `VALUE=1.6` | `VALUE=1.5` | `VALUE=1.4` | - ||**`VALUE=1.6`** | -| 3 | - | `VALUE=1.7` | - | `VALUE=1.5` | `VALUE=1.4` | - ||**`VALUE=1.7`** | -| 4 | - | - | - | `VALUE=1.5` | `VALUE=1.4` | `VALUE=1.3` ||**`VALUE=1.5`** | -| 5 |`--env VALUE=1.8` | - | - | `VALUE=1.5` | `VALUE=1.4` | `VALUE=1.3` ||**`VALUE=1.8`** | -| 6 |`--env VALUE` | - | - | `VALUE=1.5` | `VALUE=1.4` | `VALUE=1.3` ||**`VALUE=1.4`** | -| 7 |`--env VALUE` | - | - | `VALUE=1.5` | - | `VALUE=1.3` ||**`VALUE=1.3`** | -| 8 | - | - | `VALUE` | `VALUE=1.5` | `VALUE=1.4` | `VALUE=1.3` ||**`VALUE=1.4`** | -| 9 | - | - | `VALUE` | `VALUE=1.5` | - | `VALUE=1.3` ||**`VALUE=1.3`** | -| 10 | - | `VALUE` | - | `VALUE=1.5` | `VALUE=1.4` | `VALUE=1.3` ||**`VALUE=1.4`** | -| 11 | - | `VALUE` | - | `VALUE=1.5` | - | `VALUE=1.3` ||**`VALUE=1.3`** | -| 12 |`--env VALUE` | `VALUE=1.7` | - | `VALUE=1.5` | `VALUE=1.4` | `VALUE=1.3` ||**`VALUE=1.4`** | -| 13 |`--env VALUE=1.8` | `VALUE=1.7` | - | `VALUE=1.5` | `VALUE=1.4` | `VALUE=1.3` ||**`VALUE=1.8`** | -| 14 |`--env VALUE=1.8` | - | `VALUE=1.6` | `VALUE=1.5` | `VALUE=1.4` | `VALUE=1.3` ||**`VALUE=1.8`** | -| 15 |`--env VALUE=1.8` | `VALUE=1.7` | `VALUE=1.6` | `VALUE=1.5` | `VALUE=1.4` | `VALUE=1.3` ||**`VALUE=1.8`** | - -### Result explanation +| # | `docker compose run` | `environment` attribute | `env_file` attribute | Image `ENV` | `Host OS` environment | `.env` file | Result | +|:--:|:----------------:|:-------------------------------:|:----------------------:|:------------:|:-----------------------:|:-----------------:|:----------:| +| 1 | - | - | - | - | `VALUE=1.4` | `VALUE=1.3` | - | +| 2 | - | - | `VALUE=1.6` | `VALUE=1.5` | `VALUE=1.4` | - |**`VALUE=1.6`** | +| 3 | - | `VALUE=1.7` | - | `VALUE=1.5` | `VALUE=1.4` | - |**`VALUE=1.7`** | +| 4 | - | - | - | `VALUE=1.5` | `VALUE=1.4` | `VALUE=1.3` |**`VALUE=1.5`** | +| 5 |`--env VALUE=1.8` | - | - | `VALUE=1.5` | `VALUE=1.4` | `VALUE=1.3` |**`VALUE=1.8`** | +| 6 |`--env VALUE` | - | - | `VALUE=1.5` | `VALUE=1.4` | `VALUE=1.3` |**`VALUE=1.4`** | +| 7 |`--env VALUE` | - | - | `VALUE=1.5` | - | `VALUE=1.3` |**`VALUE=1.3`** | +| 8 | - | - | `VALUE` | `VALUE=1.5` | `VALUE=1.4` | `VALUE=1.3` |**`VALUE=1.4`** | +| 9 | - | - | `VALUE` | `VALUE=1.5` | - | `VALUE=1.3` |**`VALUE=1.3`** | +| 10 | - | `VALUE` | - | `VALUE=1.5` | `VALUE=1.4` | `VALUE=1.3` |**`VALUE=1.4`** | +| 11 | - | `VALUE` | - | `VALUE=1.5` | - | `VALUE=1.3` |**`VALUE=1.3`** | +| 12 |`--env VALUE` | `VALUE=1.7` | - | `VALUE=1.5` | `VALUE=1.4` | `VALUE=1.3` |**`VALUE=1.4`** | +| 13 |`--env VALUE=1.8` | `VALUE=1.7` | - | `VALUE=1.5` | `VALUE=1.4` | `VALUE=1.3` |**`VALUE=1.8`** | +| 14 |`--env VALUE=1.8` | - | `VALUE=1.6` | `VALUE=1.5` | `VALUE=1.4` | `VALUE=1.3` |**`VALUE=1.8`** | +| 15 |`--env VALUE=1.8` | `VALUE=1.7` | `VALUE=1.6` | `VALUE=1.5` | `VALUE=1.4` | `VALUE=1.3` |**`VALUE=1.8`** | + +### Understanding precedence results Result 1: The local environment takes precedence, but the Compose file is not set to replicate this inside the container, so no such variable is set. Result 2: The `env_file` attribute in the Compose file defines an explicit value for `VALUE` so the container environment is set accordingly. -Result 3: The `environment` attribute in the Compose file defines an explicit value for `VALUE`, so the container environment is set accordingly/ +Result 3: The `environment` attribute in the Compose file defines an explicit value for `VALUE`, so the container environment is set accordingly. Result 4: The image's `ENV` directive declares the variable `VALUE`, and since the Compose file is not set to override this value, this variable is defined by image -Result 5: The `docker compose run` command has the `--env` flag set which an explicit value, and overrides the value set by the image. +Result 5: The `docker compose run` command has the `--env` flag set with an explicit value, and overrides the value set by the image. Result 6: The `docker compose run` command has the `--env` flag set to replicate the value from the environment. Host OS value takes precedence and is replicated into the container's environment. -Result 7: The `docker compose run` command has the `--env` flag set to replicate the value from the environment. Value from `.env` file is the selected to define the container's environment. +Result 7: The `docker compose run` command has the `--env` flag set to replicate the value from the environment. Value from `.env` file is selected to define the container's environment. Result 8: The `env_file` attribute in the Compose file is set to replicate `VALUE` from the local environment. Host OS value takes precedence and is replicated into the container's environment. -Result 9: The `env_file` attribute in the Compose file is set to replicate `VALUE` from the local environment. Value from `.env` file is the selected to define the container's environment. +Result 9: The `env_file` attribute in the Compose file is set to replicate `VALUE` from the local environment. Value from `.env` file is selected to define the container's environment. Result 10: The `environment` attribute in the Compose file is set to replicate `VALUE` from the local environment. Host OS value takes precedence and is replicated into the container's environment. -Result 11: The `environment` attribute in the Compose file is set to replicate `VALUE` from the local environment. Value from `.env` file is the selected to define the container's environment. +Result 11: The `environment` attribute in the Compose file is set to replicate `VALUE` from the local environment. Value from `.env` file is selected to define the container's environment. Result 12: The `--env` flag has higher precedence than the `environment` and `env_file` attributes and is to set to replicate `VALUE` from the local environment. Host OS value takes precedence and is replicated into the container's environment. Results 13 to 15: The `--env` flag has higher precedence than the `environment` and `env_file` attributes and so sets the value. + +## Next steps + +- [Set environment variables in Compose](set-environment-variables.md) +- [Use variable interpolation in Compose files](variable-interpolation.md) diff --git a/content/manuals/compose/how-tos/environment-variables/envvars.md b/content/manuals/compose/how-tos/environment-variables/envvars.md index 54a2a5e446a0..f17aa2b9f2cb 100644 --- a/content/manuals/compose/how-tos/environment-variables/envvars.md +++ b/content/manuals/compose/how-tos/environment-variables/envvars.md @@ -1,7 +1,7 @@ --- description: Compose pre-defined environment variables -keywords: fig, composition, compose, docker, orchestration, cli, reference -title: Set or change pre-defined environment variables in Docker Compose +keywords: fig, composition, compose, docker, orchestration, cli, reference, compose environment configuration, docker env variables +title: Configure pre-defined environment variables in Docker Compose linkTitle: Pre-defined environment variables weight: 30 aliases: @@ -9,9 +9,9 @@ aliases: - /compose/environment-variables/envvars/ --- -Compose already comes with pre-defined environment variables. It also inherits common Docker CLI environment variables, such as `DOCKER_HOST` and `DOCKER_CONTEXT`. See [Docker CLI environment variable reference](/reference/cli/docker/#environment-variables) for details. +Docker Compose includes several pre-defined environment variables. It also inherits common Docker CLI environment variables, such as `DOCKER_HOST` and `DOCKER_CONTEXT`. See [Docker CLI environment variable reference](/reference/cli/docker/#environment-variables) for details. -This page contains information on how you can set or change the following pre-defined environment variables if you need to: +This page explains how to set or change the following pre-defined environment variables: - `COMPOSE_PROJECT_NAME` - `COMPOSE_FILE` @@ -26,19 +26,23 @@ This page contains information on how you can set or change the following pre-de - `COMPOSE_ENV_FILES` - `COMPOSE_MENU` - `COMPOSE_EXPERIMENTAL` +- `COMPOSE_PROGRESS` ## Methods to override -You can set or change the pre-defined environment variables: -- With an [`.env` file located in your working directory](/manuals/compose/how-tos/environment-variables/variable-interpolation.md) -- From the command line -- From your [shell](variable-interpolation.md#substitute-from-the-shell) +| Method | Description | +| ----------- | -------------------------------------------- | +| [`.env` file](/manuals/compose/how-tos/environment-variables/variable-interpolation.md) | Located in the working directory. | +| [Shell](variable-interpolation.md#substitute-from-the-shell) | Defined in the host operating system shell. | +| CLI | Passed with `--env` or `-e` flag at runtime. | When changing or setting any environment variables, be aware of [Environment variable precedence](envvars-precedence.md). -## Configure +## Configuration details -### COMPOSE\_PROJECT\_NAME +### Project and file configuration + +#### COMPOSE\_PROJECT\_NAME Sets the project name. This value is prepended along with the service name to the container's name on startup. @@ -50,7 +54,7 @@ Compose can set the project name in different ways. The level of precedence (fro 1. The `-p` command line flag 2. `COMPOSE_PROJECT_NAME` -3. The top level `name:` variable from the config file (or the last `name:` from +3. The top-level `name:` variable from the config file (or the last `name:` from a series of config files specified using `-f`) 4. The `basename` of the project directory containing the config file (or containing the first config file specified using `-f`) @@ -63,7 +67,7 @@ constraint, you must use one of the other mechanisms. See also the [command-line options overview](/reference/cli/docker/compose/_index.md#command-options-overview-and-help) and [using `-p` to specify a project name](/reference/cli/docker/compose/_index.md#use--p-to-specify-a-project-name). -### COMPOSE\_FILE +#### COMPOSE\_FILE Specifies the path to a Compose file. Specifying multiple Compose files is supported. @@ -74,32 +78,54 @@ Specifies the path to a Compose file. Specifying multiple Compose files is suppo For example: ```console - COMPOSE_FILE=docker-compose.yml:docker-compose.prod.yml + COMPOSE_FILE=compose.yaml:compose.prod.yaml ``` The path separator can also be customized using [`COMPOSE_PATH_SEPARATOR`](#compose_path_separator). -See also the [command-line options overview](/reference/cli/docker/compose/_index.md#command-options-overview-and-help) and [using `-f` to specify name and path of one or more Compose files](/reference/cli/docker/compose/_index.md#use--f-to-specify-name-and-path-of-one-or-more-compose-files). +See also the [command-line options overview](/reference/cli/docker/compose/_index.md#command-options-overview-and-help) and [using `-f` to specify name and path of one or more Compose files](/reference/cli/docker/compose/_index.md#use--f-to-specify-the-name-and-path-of-one-or-more-compose-files). -### COMPOSE\_PROFILES +#### COMPOSE\_PROFILES Specifies one or more profiles to be enabled when `docker compose up` is run. Services with matching profiles are started as well as any services for which no profile has been defined. -For example, calling `docker compose up`with `COMPOSE_PROFILES=frontend` selects services with the +For example, calling `docker compose up` with `COMPOSE_PROFILES=frontend` selects services with the `frontend` profile as well as any services without a profile specified. If specifying multiple profiles, use a comma as a separator. -This following example enables all services matching both the `frontend` and `debug` profiles and services without a profile. +The following example enables all services matching both the `frontend` and `debug` profiles and services without a profile. ```console COMPOSE_PROFILES=frontend,debug ``` -See also [Using profiles with Compose](../profiles.md) and the [`--profile` command-line option](/reference/cli/docker/compose/_index.md#use---profile-to-specify-one-or-more-active-profiles). +See also [Using profiles with Compose](../profiles.md) and the [`--profile` command-line option](/reference/cli/docker/compose/_index.md#use-profiles-to-enable-optional-services). + +#### COMPOSE\_PATH\_SEPARATOR + +Specifies a different path separator for items listed in `COMPOSE_FILE`. + +- Defaults to: + - On macOS and Linux to `:` + - On Windows to`;` + +#### COMPOSE\_ENV\_FILES -### COMPOSE\_CONVERT\_WINDOWS\_PATHS +Specifies which environment files Compose should use if `--env-file` isn't used. + +When using multiple environment files, use a comma as a separator. For example: + +```console +COMPOSE_ENV_FILES=.env.envfile1, .env.envfile2 +``` + +If `COMPOSE_ENV_FILES` is not set, and you don't provide `--env-file` in the CLI, Docker Compose uses the default behavior, which is to look for an `.env` file in the project directory. + +### Environment handling and container lifecycle + +#### COMPOSE\_CONVERT\_WINDOWS\_PATHS When enabled, Compose performs path conversion from Windows-style to Unix-style in volume definitions. @@ -108,15 +134,7 @@ When enabled, Compose performs path conversion from Windows-style to Unix-style - `false` or `0`, to disable - Defaults to: `0` -### COMPOSE\_PATH\_SEPARATOR - -Specifies a different path separator for items listed in `COMPOSE_FILE`. - -- Defaults to: - - On macOS and Linux to `:` - - On Windows to`;` - -### COMPOSE\_IGNORE\_ORPHANS +#### COMPOSE\_IGNORE\_ORPHANS When enabled, Compose doesn't try to detect orphaned containers for the project. @@ -125,7 +143,7 @@ When enabled, Compose doesn't try to detect orphaned containers for the project. - `false` or `0`, to disable - Defaults to: `0` -### COMPOSE\_REMOVE\_ORPHANS +#### COMPOSE\_REMOVE\_ORPHANS When enabled, Compose automatically removes orphaned containers when updating a service or stack. Orphaned containers are those that were created by a previous configuration but are no longer defined in the current `compose.yaml` file. @@ -134,11 +152,13 @@ When enabled, Compose automatically removes orphaned containers when updating a - `false` or `0`, to disable automatic removal. Compose displays a warning about orphaned containers instead. - Defaults to: `0` -### COMPOSE\_PARALLEL\_LIMIT +#### COMPOSE\_PARALLEL\_LIMIT Specifies the maximum level of parallelism for concurrent engine calls. -### COMPOSE\_ANSI +### Output + +#### COMPOSE\_ANSI Specifies when to print ANSI control characters. @@ -148,7 +168,7 @@ Specifies when to print ANSI control characters. - `always` or `0`, use TTY mode - Defaults to: `auto` -### COMPOSE\_STATUS\_STDOUT +#### COMPOSE\_STATUS\_STDOUT When enabled, Compose writes its internal status and progress messages to `stdout` instead of `stderr`. The default value is false to clearly separate the output streams between Compose messages and your container's logs. @@ -158,34 +178,33 @@ The default value is false to clearly separate the output streams between Compos - `false` or `0`, to disable - Defaults to: `0` -### COMPOSE\_ENV\_FILES +#### COMPOSE\_PROGRESS -Lets you specify which environment files Compose should use if `--env-file` isn't used. +{{< summary-bar feature_name="Compose progress" >}} -When using multiple environment files, use a comma as a separator. For example: +Defines the type of progress output, if `--progress` isn't used. -```console -COMPOSE_ENV_FILES=.env.envfile1, .env.envfile2 -``` +Supported values are `auto`, `tty`, `plain`, `json`, and `quiet`. +Default is `auto`. -If `COMPOSE_ENV_FILES` is not set, and you don't provide `--env-file` in the CLI, Docker Compose uses the default behavior, which is to look for an `.env` file in the project directory. +### User experience -### COMPOSE\_MENU +#### COMPOSE\_MENU -{{< introduced compose 2.26.0 "/manuals/compose/releases/release-notes.md#2260" >}} +{{< summary-bar feature_name="Compose menu" >}} When enabled, Compose displays a navigation menu where you can choose to open the Compose stack in Docker Desktop, switch on [`watch` mode](../file-watch.md), or use [Docker Debug](/reference/cli/docker/debug.md). - Supported values: - `true` or `1`, to enable - `false` or `0`, to disable -- Defaults to: `1` if you obtained Docker Compose through Docker Desktop, otherwise default is `0` +- Defaults to: `1` if you obtained Docker Compose through Docker Desktop, otherwise the default is `0` -### COMPOSE\_EXPERIMENTAL +#### COMPOSE\_EXPERIMENTAL -{{< introduced compose 2.26.0 "/manuals/compose/releases/release-notes.md#2260" >}} +{{< summary-bar feature_name="Compose experimental" >}} -This is an opt-out variable. When turned off it deactivates the experimental features such as the navigation menu or [Synchronized file shares](/manuals/desktop/features/synchronized-file-sharing.md). +This is an opt-out variable. When turned off it deactivates the experimental features. - Supported values: - `true` or `1`, to enable @@ -206,3 +225,4 @@ For more information, see [Migrate to Compose V2](/manuals/compose/releases/migr - `COMPOSE_INTERACTIVE_NO_CLI` - `COMPOSE_DOCKER_CLI_BUILD` Use `DOCKER_BUILDKIT` to select between BuildKit and the classic builder. If `DOCKER_BUILDKIT=0` then `docker compose build` uses the classic builder to build images. + diff --git a/content/manuals/compose/how-tos/environment-variables/set-environment-variables.md b/content/manuals/compose/how-tos/environment-variables/set-environment-variables.md index 929a7c3eec3e..2f7c925b4c1d 100644 --- a/content/manuals/compose/how-tos/environment-variables/set-environment-variables.md +++ b/content/manuals/compose/how-tos/environment-variables/set-environment-variables.md @@ -20,7 +20,7 @@ A container's environment is not set until there's an explicit entry in the serv ## Use the `environment` attribute You can set environment variables directly in your container's environment with the -[`environment` attribute](/reference/compose-file/services.md#environment) in your `compose.yml`. +[`environment` attribute](/reference/compose-file/services.md#environment) in your `compose.yaml`. It supports both list and mapping syntax: @@ -74,7 +74,7 @@ It can also help you keep your environment variables separate from your main con The [`env_file` attribute](/reference/compose-file/services.md#env_file) also lets you use multiple `.env` files in your Compose application. -The paths to your `.env` file, specified in the `env_file` attribute, are relative to the location of your `compose.yml` file. +The paths to your `.env` file, specified in the `env_file` attribute, are relative to the location of your `compose.yaml` file. > [!IMPORTANT] > diff --git a/content/manuals/compose/how-tos/environment-variables/variable-interpolation.md b/content/manuals/compose/how-tos/environment-variables/variable-interpolation.md index a13e51a9305f..04b185534697 100644 --- a/content/manuals/compose/how-tos/environment-variables/variable-interpolation.md +++ b/content/manuals/compose/how-tos/environment-variables/variable-interpolation.md @@ -21,7 +21,7 @@ Below is a simple example: ```console $ cat .env TAG=v1.5 -$ cat compose.yml +$ cat compose.yaml services: web: image: "webapp:${TAG}" @@ -98,7 +98,7 @@ services: #### Additional information -- If you define a variable in your `.env` file, you can reference it directly in your `compose.yml` with the [`environment` attribute](/reference/compose-file/services.md#environment). For example, if your `.env` file contains the environment variable `DEBUG=1` and your `compose.yml` file looks like this: +- If you define a variable in your `.env` file, you can reference it directly in your `compose.yaml` with the [`environment` attribute](/reference/compose-file/services.md#environment). For example, if your `.env` file contains the environment variable `DEBUG=1` and your `compose.yaml` file looks like this: ```yaml services: webapp: @@ -149,6 +149,21 @@ The following syntax rules apply to environment files: - `VAR="some\tvalue"` -> `some value` - `VAR='some\tvalue'` -> `some\tvalue` - `VAR=some\tvalue` -> `some\tvalue` +- Single-quoted values can span multiple lines. Example: + + ```yaml + KEY='SOME + VALUE' + ``` + + If you then run `docker compose config`, you'll see: + + ```yaml + environment: + KEY: |- + SOME + VALUE + ``` ### Substitute with `--env-file` @@ -163,14 +178,14 @@ $ docker compose --env-file ./config/.env.dev up #### Additional information -- This method is useful if you want to temporarily override an `.env` file that is already referenced in your `compose.yml` file. For example you may have different `.env` files for production ( `.env.prod`) and testing (`.env.test`). +- This method is useful if you want to temporarily override an `.env` file that is already referenced in your `compose.yaml` file. For example you may have different `.env` files for production ( `.env.prod`) and testing (`.env.test`). In the following example, there are two environment files, `.env` and `.env.dev`. Both have different values set for `TAG`. ```console $ cat .env TAG=v1.5 $ cat ./config/.env.dev TAG=v1.6 - $ cat compose.yml + $ cat compose.yaml services: web: image: "webapp:${TAG}" diff --git a/content/manuals/compose/how-tos/file-watch.md b/content/manuals/compose/how-tos/file-watch.md index 365c0d60aded..a9463df9683a 100644 --- a/content/manuals/compose/how-tos/file-watch.md +++ b/content/manuals/compose/how-tos/file-watch.md @@ -7,12 +7,12 @@ aliases: - /compose/file-watch/ --- -{{< introduced compose 2.22.0 "/manuals/compose/releases/release-notes.md#2220" >}} +{{< summary-bar feature_name="Compose file watch" >}} -{{< include "compose/watch.md" >}} +{{% include "compose/watch.md" %}} `watch` adheres to the following file path rules: -* All paths are relative to the project directory +* All paths are relative to the project directory, apart from ignore file patterns * Directories are watched recursively * Glob patterns aren't supported * Rules from `.dockerignore` apply @@ -31,8 +31,8 @@ Compose supports sharing a host directory inside service containers. Watch mode More importantly, `watch` allows for greater granularity than is practical with a bind mount. Watch rules let you ignore specific files or entire directories within the watched tree. For example, in a JavaScript project, ignoring the `node_modules/` directory has two benefits: -* Performance. File trees with many small files can cause high I/O load in some configurations -* Multi-platform. Compiled artifacts cannot be shared if the host OS or architecture is different to the container +* Performance. File trees with many small files can cause a high I/O load in some configurations +* Multi-platform. Compiled artifacts cannot be shared if the host OS or architecture is different from the container For example, in a Node.js project, it's not recommended to sync the `node_modules/` directory. Even though JavaScript is interpreted, `npm` packages can contain native code that is not portable across platforms. @@ -40,7 +40,7 @@ For example, in a Node.js project, it's not recommended to sync the `node_module The `watch` attribute defines a list of rules that control automatic service updates based on local file changes. -Each rule requires, a `path` pattern and `action` to take when a modification is detected. There are two possible actions for `watch` and depending on +Each rule requires a `path` pattern and `action` to take when a modification is detected. There are two possible actions for `watch` and depending on the `action`, additional fields might be accepted or required. Watch mode can be used with many different languages and frameworks. @@ -88,15 +88,15 @@ If `action` is set to `rebuild`, Compose automatically builds a new image with B The behavior is the same as running `docker compose up --build `. -Rebuild is ideal for compiled languages or as fallbacks for modifications to particular files that require a full +Rebuild is ideal for compiled languages or as a fallback for modifications to particular files that require a full image rebuild (e.g. `package.json`). #### Sync + Restart -If `action` is set to `sync+restart`, Compose synchronizes your changes with the service containers and restarts it. +If `action` is set to `sync+restart`, Compose synchronizes your changes with the service containers and restarts them. -`sync+restart` is ideal when config file changes, and you don't need to rebuild the image but just restart the main process of the service containers. -It will work well when you update a database configuration or your `nginx.conf` file for example +`sync+restart` is ideal when the config file changes, and you don't need to rebuild the image but just restart the main process of the service containers. +It will work well when you update a database configuration or your `nginx.conf` file, for example. >[!TIP] > @@ -114,6 +114,10 @@ For `path: ./app/html` and a change to `./app/html/index.html`: * `target: /app/static` -> `/app/static/index.html` * `target: /assets` -> `/assets/index.html` +### `ignore` + +The `ignore` patterns are relative to the `path` defined in the current `watch` action, not to the project directory. In the following Example 1, the ignore path would be relative to the `./web` directory specified in the `path` attribute. + ## Example 1 This minimal example targets a Node.js application with the following structure: @@ -121,7 +125,8 @@ This minimal example targets a Node.js application with the following structure: myproject/ ├── web/ │ ├── App.jsx -│ └── index.js +│ ├── index.js +│ └── node_modules/ ├── Dockerfile ├── compose.yaml └── package.json @@ -152,6 +157,8 @@ For example, `./web/App.jsx` is copied to `/src/web/App.jsx`. Once copied, the bundler updates the running application without a restart. +And in this case, the `ignore` rule would apply to `myproject/web/node_modules/`, not `myproject/node_modules/`. + Unlike source code files, adding a new dependency can’t be done on-the-fly, so whenever `package.json` is changed, Compose rebuilds the image and recreates the `web` service container. @@ -187,7 +194,7 @@ This setup demonstrates how to use the `sync+restart` action in Docker Compose t ## Use `watch` -{{< include "compose/configure-watch.md" >}} +{{% include "compose/configure-watch.md" %}} > [!NOTE] > @@ -200,10 +207,6 @@ This setup demonstrates how to use the `sync+restart` action in Docker Compose t > or [local setup for Docker docs](https://github.com/docker/docs/blob/main/CONTRIBUTING.md) > for a demonstration of Compose `watch`. -## Feedback - -We are actively looking for feedback on this feature. Give feedback or report any bugs you may find in the [Compose Specification repository](https://github.com/compose-spec/compose-spec/pull/253). - ## Reference - [Compose Develop Specification](/reference/compose-file/develop.md) diff --git a/content/manuals/compose/how-tos/gpu-support.md b/content/manuals/compose/how-tos/gpu-support.md index 6e0547fb6968..8bbd955cb591 100644 --- a/content/manuals/compose/how-tos/gpu-support.md +++ b/content/manuals/compose/how-tos/gpu-support.md @@ -1,7 +1,7 @@ --- -description: Understand GPU support in Docker Compose +description: Learn how to configure Docker Compose to use NVIDIA GPUs with CUDA-based containers keywords: documentation, docs, docker, compose, GPU access, NVIDIA, samples -title: Enable GPU access with Docker Compose +title: Run Docker Compose services with GPU access linkTitle: Enable GPU support weight: 90 aliases: @@ -15,20 +15,22 @@ You can use either `docker-compose` or `docker compose` commands. For more infor ## Enabling GPU access to service containers -GPUs are referenced in a `compose.yml` file using the [device](/reference/compose-file/deploy.md#devices) attribute from the Compose Deploy specification, within your services that need them. +GPUs are referenced in a `compose.yaml` file using the [device](/reference/compose-file/deploy.md#devices) attribute from the Compose Deploy specification, within your services that need them. This provides more granular control over a GPU reservation as custom values can be set for the following device properties: -- `capabilities`. This value specifies as a list of strings (eg. `capabilities: [gpu]`). You must set this field in the Compose file. Otherwise, it returns an error on service deployment. -- `count`. This value, specified as an integer or the value `all`, represents the number of GPU devices that should be reserved (providing the host holds that number of GPUs). If `count` is set to `all` or not specified, all GPUs available on the host are used by default. +- `capabilities`. This value is specified as a list of strings. For example, `capabilities: [gpu]`. You must set this field in the Compose file. Otherwise, it returns an error on service deployment. +- `count`. Specified as an integer or the value `all`, represents the number of GPU devices that should be reserved (providing the host holds that number of GPUs). If `count` is set to `all` or not specified, all GPUs available on the host are used by default. - `device_ids`. This value, specified as a list of strings, represents GPU device IDs from the host. You can find the device ID in the output of `nvidia-smi` on the host. If no `device_ids` are set, all GPUs available on the host are used by default. -- `driver`. This value is specified as a string, for example `driver: 'nvidia'` +- `driver`. Specified as a string, for example `driver: 'nvidia'` - `options`. Key-value pairs representing driver specific options. > [!IMPORTANT] > > You must set the `capabilities` field. Otherwise, it returns an error on service deployment. + +> [!NOTE] > > `count` and `device_ids` are mutually exclusive. You must only define one field at a time. @@ -39,7 +41,7 @@ For more information on these properties, see the [Compose Deploy Specification] ```yaml services: test: - image: nvidia/cuda:12.3.1-base-ubuntu20.04 + image: nvidia/cuda:12.9.0-base-ubuntu22.04 command: nvidia-smi deploy: resources: diff --git a/content/manuals/compose/how-tos/lifecycle.md b/content/manuals/compose/how-tos/lifecycle.md index d044f1bed13d..5857539c161d 100644 --- a/content/manuals/compose/how-tos/lifecycle.md +++ b/content/manuals/compose/how-tos/lifecycle.md @@ -2,16 +2,16 @@ title: Using lifecycle hooks with Compose linkTitle: Use lifecycle hooks weight: 20 -desription: How to use lifecycle hooks with Docker Compose -keywords: cli, compose, lifecycle, hooks reference +description: Learn how to use Docker Compose lifecycle hooks like post_start and pre_stop to customize container behavior. +keywords: docker compose lifecycle hooks, post_start, pre_stop, docker compose entrypoint, docker container stop hooks, compose hook commands --- -{{< introduced compose 2.30.0 "../releases/release-notes.md#2300" >}} +{{< summary-bar feature_name="Compose lifecycle hooks" >}} ## Services lifecycle hooks When Docker Compose runs a container, it uses two elements, -[ENTRYPOINT and COMMAND](https://github.com/manuals//engine/containers/run.md#default-command-and-options), +[ENTRYPOINT and COMMAND](/manuals/engine/containers/run.md#default-command-and-options), to manage what happens when the container starts and stops. However, it can sometimes be easier to handle these tasks separately with lifecycle hooks - diff --git a/content/manuals/compose/how-tos/multiple-compose-files/extends.md b/content/manuals/compose/how-tos/multiple-compose-files/extends.md index d61fa26d5b46..5380c555b2e9 100644 --- a/content/manuals/compose/how-tos/multiple-compose-files/extends.md +++ b/content/manuals/compose/how-tos/multiple-compose-files/extends.md @@ -1,7 +1,6 @@ --- -description: How to use Docker Compose's extends keyword to share configuration between - files and projects -keywords: fig, composition, compose, docker, orchestration, documentation, docs +description: Learn how to reuse service configurations across files and projects using Docker Compose’s extends attribute. +keywords: fig, composition, compose, docker, orchestration, documentation, docs, compose file modularization title: Extend your Compose file linkTitle: Extend weight: 20 @@ -29,7 +28,7 @@ configuration. Tracking which fragment of a service is relative to which path is difficult and confusing, so to keep paths easier to understand, all paths must be defined relative to the base file. -## How it works +## How the `extends` attribute works ### Extending services from another file @@ -59,10 +58,10 @@ services: - "/data" ``` You get exactly the same result as if you wrote -`docker-compose.yml` with the same `build`, `ports`, and `volumes` configuration +`compose.yaml` with the same `build`, `ports`, and `volumes` configuration values defined directly under `web`. -To include the service `webapp` in the final project when extending services from another file, you need to explicitly include both services in your current Compose file. For example (note this is a non-normative example): +To include the service `webapp` in the final project when extending services from another file, you need to explicitly include both services in your current Compose file. For example (this is for illustrative purposes only): ```yaml services: @@ -133,7 +132,7 @@ services: cpu_shares: 5 ``` -The `docker-compose.yaml` defines the concrete services which use the common +The `compose.yaml` defines the concrete services which use the common configuration: ```yaml diff --git a/content/manuals/compose/how-tos/multiple-compose-files/include.md b/content/manuals/compose/how-tos/multiple-compose-files/include.md index 70eb99e75052..db6139af59fa 100644 --- a/content/manuals/compose/how-tos/multiple-compose-files/include.md +++ b/content/manuals/compose/how-tos/multiple-compose-files/include.md @@ -6,9 +6,9 @@ aliases: - /compose/multiple-compose-files/include/ --- -{{< introduced compose 2.20.3 "/manuals/compose/releases/release-notes.md#2203" >}} +{{< summary-bar feature_name="Compose include" >}} -{{< include "compose/include.md" >}} +{{% include "compose/include.md" %}} The [`include` top-level element](/reference/compose-file/include.md) helps to reflect the engineering team responsible for the code directly in the config file's organization. It also solves the relative path problem that [`extends`](extends.md) and [merge](merge.md) present. @@ -18,7 +18,7 @@ Once the included Compose application loads, all resources are copied into the c > [!NOTE] > -> `include` applies recursively so an included Compose file which declares its own `include` section, results in those other files being included as well. +> `include` applies recursively so an included Compose file which declares its own `include` section, causes those files to also be included. ## Example @@ -36,11 +36,24 @@ services: This means the team managing `serviceB` can refactor its own database component to introduce additional services without impacting any dependent teams. It also means that the dependent teams don't need to include additional flags on each Compose command they run. -## Include and overrides +```yaml +include: + - oci://docker.io/username/my-compose-app:latest # use a Compose file stored as an OCI artifact +services: + serviceA: + build: . + depends_on: + - serviceB +``` +`include` allows you to reference Compose files from remote sources, such as OCI artifacts or Git repositories. +Here `serviceB` is defined in a Compose file stored on Docker Hub. + +## Using overrides with included Compose files Compose reports an error if any resource from `include` conflicts with resources from the included Compose file. This rule prevents -unexpected conflicts with resources defined by the included compose file author. However, there may be some circumstances where you might want to tweak the +unexpected conflicts with resources defined by the included compose file author. However, there may be some circumstances where you might want to customize the included model. This can be achieved by adding an override file to the include directive: + ```yaml include: - path : @@ -49,7 +62,7 @@ include: ``` The main limitation with this approach is that you need to maintain a dedicated override file per include. For complex projects with multiple -includes this would result into many Compose files. +includes this would result in many Compose files. The other option is to use a `compose.override.yaml` file. While conflicts will be rejected from the file using `include` when same resource is declared, a global Compose override file can override the resulting merged model, as demonstrated in following example: diff --git a/content/manuals/compose/how-tos/networking.md b/content/manuals/compose/how-tos/networking.md index 23ddc7455148..7045237d0635 100644 --- a/content/manuals/compose/how-tos/networking.md +++ b/content/manuals/compose/how-tos/networking.md @@ -8,7 +8,7 @@ aliases: - /compose/networking/ --- -{{< include "compose-eol.md" >}} +{{% include "compose-eol.md" %}} By default Compose sets up a single [network](/reference/cli/docker/network/create.md) for your app. Each @@ -22,7 +22,7 @@ other containers on that network, and discoverable by the service's name. > project name with either the [`--project-name` flag](/reference/cli/docker/compose.md) > or the [`COMPOSE_PROJECT_NAME` environment variable](environment-variables/envvars.md#compose_project_name). -For example, suppose your app is in a directory called `myapp`, and your `compose.yml` looks like this: +For example, suppose your app is in a directory called `myapp`, and your `compose.yaml` looks like this: ```yaml services: @@ -164,7 +164,9 @@ networks: driver: custom-driver-1 ``` -## Use a pre-existing network +## Use an existing network + +If you've manually created a bridge network outside of Compose using the `docker network create` command, you can connect your Compose services to it by marking the network as `external`. If you want your containers to join a pre-existing network, use the [`external` option](/reference/compose-file/networks.md#external) ```yaml diff --git a/content/manuals/compose/how-tos/oci-artifact.md b/content/manuals/compose/how-tos/oci-artifact.md new file mode 100644 index 000000000000..6125ea989ce7 --- /dev/null +++ b/content/manuals/compose/how-tos/oci-artifact.md @@ -0,0 +1,155 @@ +--- +title: Package and deploy Docker Compose applications as OCI artifacts +linkTitle: OCI artifact applications +weight: 110 +description: Learn how to package, publish, and securely run Docker Compose applications from OCI-compliant registries. +keywords: cli, compose, oci, docker hub, artificats, publish, package, distribute, docker compose oci support +params: + sidebar: + badge: + color: green + text: New +--- + +{{< summary-bar feature_name="Compose OCI artifact" >}} + +Docker Compose supports working with [OCI artifacts](/manuals/docker-hub/repos/manage/hub-images/oci-artifacts.md), allowing you to package and distribute your Compose applications through container registries. This means you can store your Compose files alongside your container images, making it easier to version, share, and deploy your multi-container applications. + +## Publish your Compose application as an OCI artifact + +To distribute your Compose application as an OCI artifact, you can use the `docker compose publish` command, to publish it to an OCI-compliant registry. +This allows others to then deploy your application directly from the registry. + +The publish function supports most of the composition capabilities of Compose, like overrides, extends or include, [with some limitations](#limitations). + +### General steps + +1. Navigate to your Compose application's directory. + Ensure you're in the directory containing your `compose.yml` file or that you are specifying your Compose file with the `-f` flag. + +2. In your terminal, sign in to your Docker account so you're authenticated with Docker Hub. + + ```console + $ docker login + ``` + +3. Use the `docker compose publish` command to push your application as an OCI artifact: + + ```console + $ docker compose publish username/my-compose-app:latest + ``` + If you have multiple Compose files, run: + + ```console + $ docker compose -f compose-base.yml -f compose-production.yml publish username/my-compose-app:latest + ``` + +### Advanced publishing options + +When publishing, you can pass additional options: +- `--oci-version`: Specify the OCI version (default is automatically determined). +- `--resolve-image-digests`: Pin image tags to digests. +- `--with-env`: Include environment variables in the published OCI artifact. + +Compose checks to make sure there isn't any sensitive data in your configuration and displays your environment variables to confirm you want to publish them. + +```text +... +you are about to publish sensitive data within your OCI artifact. +please double check that you are not leaking sensitive data +AWS Client ID +"services.serviceA.environment.AWS_ACCESS_KEY_ID": xxxxxxxxxx +AWS Secret Key +"services.serviceA.environment.AWS_SECRET_ACCESS_KEY": aws"xxxx/xxxx+xxxx+" +Github authentication +"GITHUB_TOKEN": ghp_xxxxxxxxxx +JSON Web Token +"": xxxxxxx.xxxxxxxx.xxxxxxxx +Private Key +"": -----BEGIN DSA PRIVATE KEY----- +xxxxx +-----END DSA PRIVATE KEY----- +Are you ok to publish these sensitive data? [y/N]:y + +you are about to publish environment variables within your OCI artifact. +please double check that you are not leaking sensitive data +Service/Config serviceA +FOO=bar +Service/Config serviceB +FOO=bar +QUIX= +BAR=baz +Are you ok to publish these environment variables? [y/N]: +``` + +If you decline, the publish process stops without sending anything to the registry. + +## Limitations + +There are limitations to publishing Compose applications as OCI artifacts. You can't publish a Compose configuration: +- With service(s) containing bind mounts +- With service(s) containing only a `build` section +- That includes local files with the `include` attribute. To publish successfully, ensure that any included local files are also published. You can then use `include` to reference these files as remote `include` is supported. + +## Start an OCI artifact application + +To start a Docker Compose application that uses an OCI artifact, you can use the `-f` (or `--file`) flag followed by the OCI artifact reference. This allows you to specify a Compose file stored as an OCI artifact in a registry. + +The `oci://` prefix indicates that the Compose file should be pulled from an OCI-compliant registry rather than loaded from the local filesystem. + +```console +$ docker compose -f oci://docker.io/username/my-compose-app:latest up +``` + +To then run the Compose application, use the `docker compose up` command with the `-f` flag pointing to your OCI artifact: + +```console +$ docker compose -f oci://docker.io/username/my-compose-app:latest up +``` + +### Troubleshooting + +When you run an application from an OCI artifact, Compose may display warning messages that require you to confirm the following so as to limit the risk of running a malicious application: + +- A list of the interpolation variables used along with their values +- A list of all environment variables used by the application +- If your OCI artifact application is using another remote resources, for example via [`include`](/reference/compose-file/include/). + +```text +$ REGISTRY=myregistry.com docker compose -f oci://docker.io/username/my-compose-app:latest up + +Found the following variables in configuration: +VARIABLE VALUE SOURCE REQUIRED DEFAULT +REGISTRY myregistry.com command-line yes +TAG v1.0 environment no latest +DOCKERFILE Dockerfile default no Dockerfile +API_KEY none no + +Do you want to proceed with these variables? [Y/n]:y + +Warning: This Compose project includes files from remote sources: +- oci://registry.example.com/stack:latest +Remote includes could potentially be malicious. Make sure you trust the source. +Do you want to continue? [y/N]: +``` + +If you agree to start the application, Compose displays the directory where all the resources from the OCI artifact have been downloaded: + +```text +... +Do you want to continue? [y/N]: y + +Your compose stack "oci://registry.example.com/stack:latest" is stored in "~/Library/Caches/docker-compose/964e715660d6f6c3b384e05e7338613795f7dcd3613890cfa57e3540353b9d6d" +``` + +The `docker compose publish` command supports non-interactive execution, letting you skip the confirmation prompt by including the `-y` (or `--yes`) flag: + +```console +$ docker compose publish -y username/my-compose-app:latest +``` + +## Next steps + +- [Learn about OCI artifacts in Docker Hub](/manuals/docker-hub/repos/manage/hub-images/oci-artifacts.md) +- [Compose publish command](/reference/cli/docker/compose/publish.md) +- [Understand `include`](/reference/compose-file/include.md) diff --git a/content/manuals/compose/how-tos/production.md b/content/manuals/compose/how-tos/production.md index 7ab4d49aec0f..d2c8e4189942 100644 --- a/content/manuals/compose/how-tos/production.md +++ b/content/manuals/compose/how-tos/production.md @@ -1,6 +1,6 @@ --- -description: Guide to using Docker Compose in production -keywords: compose, orchestration, containers, production +description: Learn how to configure, deploy, and update Docker Compose applications for production environments. +keywords: compose, orchestration, containers, production, production docker compose configuration title: Use Compose in production weight: 100 aliases: @@ -29,15 +29,15 @@ production. These changes might include: - Adding extra services such as a log aggregator For this reason, consider defining an additional Compose file, for example -`production.yml`, which specifies production-appropriate -configuration. This configuration file only needs to include the changes you want to make from the original Compose file. The additional Compose file -is then applied over the original `compose.yml` to create a new configuration. +`compose.production.yaml`, with production-specific +configuration details. This configuration file only needs to include the changes you want to make from the original Compose file. The additional Compose file +is then applied over the original `compose.yaml` to create a new configuration. Once you have a second configuration file, you can use it with the `-f` option: ```console -$ docker compose -f compose.yml -f production.yml up -d +$ docker compose -f compose.yaml -f compose.production.yaml up -d ``` See [Using multiple compose files](multiple-compose-files/_index.md) for a more complete example, and other options. @@ -55,7 +55,7 @@ $ docker compose up --no-deps -d web This first command rebuilds the image for `web` and then stops, destroys, and recreates just the `web` service. The `--no-deps` flag prevents Compose from also -recreating any services which `web` depends on. +recreating any services that `web` depends on. ### Running Compose on a single server @@ -65,3 +65,8 @@ appropriately. For more information, see [pre-defined environment variables](env Once you've set up your environment variables, all the normal `docker compose` commands work with no further configuration. + +## Next steps + +- [Using multiple Compose files](multiple-compose-files/_index.md) + diff --git a/content/manuals/compose/how-tos/profiles.md b/content/manuals/compose/how-tos/profiles.md index f77854fc9367..13d472e5dc39 100644 --- a/content/manuals/compose/how-tos/profiles.md +++ b/content/manuals/compose/how-tos/profiles.md @@ -8,7 +8,7 @@ aliases: - /compose/profiles/ --- -{{< include "compose/profiles.md" >}} +{{% include "compose/profiles.md" %}} ## Assigning profiles to services @@ -61,7 +61,7 @@ $ COMPOSE_PROFILES=debug docker compose up ``` Both commands start the services with the `debug` profile enabled. -In the previous `compose.yml` file, this starts the services +In the previous `compose.yaml` file, this starts the services `db`, `backend` and `phpmyadmin`. ### Start multiple profiles @@ -85,6 +85,12 @@ If you want to enable all profiles at the same time, you can run `docker compose ## Auto-starting profiles and dependency resolution +When you explicitly target a service on the command line that has one or more profiles assigned, you do not need to enable the profile manually as Compose runs that service regardless of whether its profile is activated. This is useful for running one-off services or debugging tools. + +Only the targeted service (and any of its declared dependencies via `depends_on`) is started. Other services that share the same profile will not be started unless: +- They are also explicitly targeted, or +- The profile is explicitly enabled using `--profile` or `COMPOSE_PROFILES`. + When a service with assigned `profiles` is explicitly targeted on the command line its profiles are started automatically so you don't need to start them manually. This can be used for one-off services and debugging tools. @@ -108,74 +114,21 @@ services: ``` ```sh -# Only start backend and db +# Only start backend and db (no profiles involved) $ docker compose up -d -# This runs db-migrations (and,if necessary, start db) -# by implicitly enabling the profiles `tools` +# Run the db-migrations service without manually enabling the 'tools' profile $ docker compose run db-migrations ``` -But keep in mind that `docker compose` only automatically starts the -profiles of the services on the command line and not of any dependencies. - -This means that any other services the targeted service `depends_on` should either: -- Share a common profile -- Always be started, by omitting `profiles` or having a matching profile started explicitly - -```yaml -services: - web: - image: web - - mock-backend: - image: backend - profiles: ["dev"] - depends_on: - - db - - db: - image: mysql - profiles: ["dev"] - - phpmyadmin: - image: phpmyadmin - profiles: ["debug"] - depends_on: - - db -``` - -```sh -# Only start "web" -$ docker compose up -d - -# Start mock-backend (and, if necessary, db) -# by implicitly enabling profiles `dev` -$ docker compose up -d mock-backend - -# This fails because profiles "dev" is not enabled -$ docker compose up phpmyadmin -``` - -Although targeting `phpmyadmin` automatically starts the profiles `debug`, it doesn't automatically start the profiles required by `db` which is `dev`. - -To fix this you either have to add the `debug` profile to the `db` service: - -```yaml -db: - image: mysql - profiles: ["debug", "dev"] -``` - -or start the `dev` profile explicitly: +In this example, `db-migrations` runs even though it is assigned to the tools profile, because it was explicitly targeted. The `db` service is also started automatically because it is listed in `depends_on`. -```console -# Profiles "debug" is started automatically by targeting phpmyadmin -$ docker compose --profile dev up phpmyadmin -$ COMPOSE_PROFILES=dev docker compose up phpmyadmin -``` +If the targeted service has dependencies that are also gated behind a profile, you must ensure those dependencies are either: + - In the same profile + - Started separately + - Not assigned to any profile so are always enabled -## Stop specific profiles +## Stop application and services with specific profiles As with starting specific profiles, you can use the `--profile` [command-line option](/reference/cli/docker/compose.md#use--p-to-specify-a-project-name) or use the [`COMPOSE_PROFILES` environment variable](environment-variables/envvars.md#compose_profiles): @@ -187,7 +140,7 @@ $ docker compose --profile debug down $ COMPOSE_PROFILES=debug docker compose down ``` -Both commands stop and remove services with the `debug` profile. In the following `compose.yml` file, this stops the services `db` and `phpmyadmin`. +Both commands stop and remove services with the `debug` profile and services without a profile. In the following `compose.yaml` file, this stops the services `db`, `backend` and `phpmyadmin`. ```yaml services: @@ -207,6 +160,16 @@ services: image: mysql ``` +if you only want to stop the `phpmyadmin` service, you can run + +```console +$ docker compose down phpmyadmin +``` +or +```console +$ docker compose stop phpmyadmin +``` + > [!NOTE] > > Running `docker compose down` only stops `backend` and `db`. diff --git a/content/manuals/compose/how-tos/project-name.md b/content/manuals/compose/how-tos/project-name.md index 18372aa7cc5e..37aabdcaa5bd 100644 --- a/content/manuals/compose/how-tos/project-name.md +++ b/content/manuals/compose/how-tos/project-name.md @@ -1,20 +1,20 @@ --- title: Specify a project name weight: 10 -description: Understand the different ways you can set a project name in Compose and what the precedence is. +description: Learn how to set a custom project name in Compose and understand the precedence of each method. keywords: name, compose, project, -p flag, name top-level element aliases: - /compose/project-name/ --- -In Compose, the default project name is derived from the base name of the project directory. However, you have the flexibility to set a custom project name. +By default, Compose assigns the project name based on the name of the directory that contains the Compose file. You can override this with several methods. This page offers examples of scenarios where custom project names can be helpful, outlines the various methods to set a project name, and provides the order of precedence for each approach. > [!NOTE] > > The default project directory is the base directory of the Compose file. A custom value can also be set -> for it using the [`--project-directory` command line option](/reference/cli/docker/compose.md#use--p-to-specify-a-project-name). +> for it using the [`--project-directory` command line option](/reference/cli/docker/compose.md#options). ## Example use cases diff --git a/content/manuals/compose/how-tos/provider-services.md b/content/manuals/compose/how-tos/provider-services.md new file mode 100644 index 000000000000..91c5be1efa2e --- /dev/null +++ b/content/manuals/compose/how-tos/provider-services.md @@ -0,0 +1,128 @@ +--- +title: Use provider services +description: Learn how to use provider services in Docker Compose to integrate external capabilities into your applications +keywords: compose, docker compose, provider, services, platform capabilities, integration, model runner, ai +weight: 112 +params: + sidebar: + badge: + color: green + text: New +--- + +{{< summary-bar feature_name="Compose provider services" >}} + +Docker Compose supports provider services, which allow integration with services whose lifecycles are managed by third-party components rather than by Compose itself. +This feature enables you to define and utilize platform-specific services without the need for manual setup or direct lifecycle management. + +## What are provider services? + +Provider services are a special type of service in Compose that represents platform capabilities rather than containers. +They allow you to declare dependencies on specific platform features that your application needs. + +When you define a provider service in your Compose file, Compose works with the platform to provision and configure +the requested capability, making it available to your application services. + +## Using provider services + +To use a provider service in your Compose file, you need to: + +1. Define a service with the `provider` attribute +2. Specify the `type` of provider you want to use +3. Configure any provider-specific options +4. Declare dependencies from your application services to the provider service + +Here's a basic example: + +```yaml +services: + database: + provider: + type: awesomecloud + options: + type: mysql + foo: bar + app: + image: myapp + depends_on: + - database +``` + +Notice the dedicated `provider` attribute in the `database` service. +This attribute specifies that the service is managed by a provider and lets you define options specific to that provider type. + +The `depends_on` attribute in the `app` service specifies that it depends on the `database` service. +This means that the `database` service will be started before the `app` service, allowing the provider information +to be injected into the `app` service. + +## How it works + +During the `docker compose up` command execution, Compose identifies services relying on providers and works with them to provision +the requested capabilities. The provider then populates Compose model with information about how to access the provisioned resource. + +This information is passed to services that declare a dependency on the provider service, typically through environment +variables. The naming convention for these variables is: + +```env +<>_<> +``` + +For example, if your provider service is named `database`, your application service might receive environment variables like: + +- `DATABASE_URL` with the URL to access the provisioned resource +- `DATABASE_TOKEN` with an authentication token +- Other provider-specific variables + +Your application can then use these environment variables to interact with the provisioned resource. + +## Provider types + +The `type` field in a provider service references the name of either: + +1. A Docker CLI plugin (e.g., `docker-model`) +2. A binary available in the user's PATH + +When Compose encounters a provider service, it looks for a plugin or binary with the specified name to handle the provisioning of the requested capability. + +For example, if you specify `type: model`, Compose will look for a Docker CLI plugin named `docker-model` or a binary named `model` in the PATH. + +```yaml +services: + ai-runner: + provider: + type: model # Looks for docker-model plugin or model binary + options: + model: ai/example-model +``` + +The plugin or binary is responsible for: + +1. Interpreting the options provided in the provider service +2. Provisioning the requested capability +3. Returning information about how to access the provisioned resource + +This information is then passed to dependent services as environment variables. + +> [!TIP] +> +> If you're working with AI models in Compose, use the [`models` top-level element](/manuals/ai/compose/models-and-compose.md) instead. + +## Benefits of using provider services + +Using provider services in your Compose applications offers several benefits: + +1. Simplified configuration: You don't need to manually configure and manage platform capabilities +2. Declarative approach: You can declare all your application's dependencies in one place +3. Consistent workflow: You use the same Compose commands to manage your entire application, including platform capabilities + +## Creating your own provider + +If you want to create your own provider to extend Compose with custom capabilities, you can implement a Compose plugin that registers provider types. + +For detailed information on how to create and implement your own provider, refer to the [Compose Extensions documentation](https://github.com/docker/compose/blob/main/docs/extension.md). +This guide explains the extension mechanism that allows you to add new provider types to Compose. + +## Reference + +- [Docker Model Runner documentation](/manuals/ai/model-runner.md) +- [Compose Extensions documentation](https://github.com/docker/compose/blob/main/docs/extension.md) \ No newline at end of file diff --git a/content/manuals/compose/how-tos/startup-order.md b/content/manuals/compose/how-tos/startup-order.md index e76deb436692..1d55fd5ee14d 100644 --- a/content/manuals/compose/how-tos/startup-order.md +++ b/content/manuals/compose/how-tos/startup-order.md @@ -1,6 +1,6 @@ --- -description: How to control service startup and shutdown order in Docker Compose -keywords: documentation, docs, docker, compose, startup, shutdown, order +description: Learn how to manage service startup and shutdown order in Docker Compose using depends_on and healthchecks. +keywords: docker compose startup order, compose shutdown order, depends_on, service healthcheck, control service dependencies title: Control startup and shutdown order in Compose linkTitle: Control startup order weight: 30 @@ -13,7 +13,7 @@ You can control the order of service startup and shutdown with the containers in dependency order, where dependencies are determined by `depends_on`, `links`, `volumes_from`, and `network_mode: "service:..."`. -A good example of when you might use this is an application which needs to access a database. If both services are started with `docker compose up`, there is a chance this will fail since the application service might start before the database service and won't find a database able to handle its SQL statements. +For example, if your application needs to access a database and both services are started with `docker compose up`, there is a chance this will fail since the application service might start before the database service and won't find a database able to handle its SQL statements. ## Control startup @@ -55,7 +55,7 @@ Compose waits for healthchecks to pass on dependencies marked with `service_heal `restart: true` ensures that if `db` is updated or restarted due to an explicit Compose operation, for example `docker compose restart`, the `web` service is also restarted automatically, ensuring it re-establishes connections or dependencies correctly. -The healthcheck for the `db` service uses the `pg_isready -U ${POSTGRES_USER} -d ${POSTGRES_DB}'` command to check if the PostgreSQL database is ready. The service is retried every 10 seconds, up to 5 times. +The healthcheck for the `db` service uses the `pg_isready -U ${POSTGRES_USER} -d ${POSTGRES_DB}` command to check if the PostgreSQL database is ready. The service is retried every 10 seconds, up to 5 times. Compose also removes services in dependency order. `web` is removed before `db` and `redis`. diff --git a/content/manuals/compose/how-tos/use-secrets.md b/content/manuals/compose/how-tos/use-secrets.md index fe677ff8a406..d2c6352c3be5 100644 --- a/content/manuals/compose/how-tos/use-secrets.md +++ b/content/manuals/compose/how-tos/use-secrets.md @@ -1,9 +1,9 @@ --- -title: How to use secrets in Docker Compose +title: Manage secrets securely in Docker Compose linkTitle: Secrets in Compose weight: 60 -description: How to use secrets in Compose and their benefits -keywords: secrets, compose, security, environment variables +description: Learn how to securely manage runtime and build-time secrets in Docker Compose. +keywords: secrets, compose, security, environment variables, docker secrets, secure Docker builds, sensitive data in containers tags: [Secrets] aliases: - /compose/use-secrets/ @@ -11,7 +11,7 @@ aliases: A secret is any piece of data, such as a password, certificate, or API key, that shouldn’t be transmitted over a network or stored unencrypted in a Dockerfile or in your application’s source code. -{{< include "compose/secrets.md" >}} +{{% include "compose/secrets.md" %}} Environment variables are often available to all processes, and it can be difficult to track access. They can also be printed in logs when debugging errors without your knowledge. Using secrets mitigates these risks. @@ -25,7 +25,7 @@ Unlike the other methods, this permits granular access control within a service ## Examples -### Simple +### Single-service secret injection In the following example, the frontend service is given access to the `my_secret` secret. In the container, `/run/secrets/my_secret` is set to the contents of the file `./my_secret.txt`. @@ -40,7 +40,7 @@ secrets: file: ./my_secret.txt ``` -### Advanced +### Multi-service secret sharing and password management ```yaml services: diff --git a/content/manuals/compose/install/_index.md b/content/manuals/compose/install/_index.md index 8e24ad484bad..510942e8f981 100644 --- a/content/manuals/compose/install/_index.md +++ b/content/manuals/compose/install/_index.md @@ -1,9 +1,7 @@ --- description: Learn how to install Docker Compose. Compose is available natively on Docker Desktop, as a Docker Engine plugin, and as a standalone tool. -keywords: install docker compose, docker compose install, install docker compose ubuntu, - installing docker compose, docker compose download, docker compose not found, docker - compose windows, how to install docker compose +keywords: install docker compose, docker compose plugin, install compose linux, install docker desktop, docker compose windows, standalone docker compose, docker compose not found title: Overview of installing Docker Compose linkTitle: Install weight: 20 @@ -14,46 +12,36 @@ aliases: - /compose/install/compose-desktop/ --- -This page contains summary information about the available options for installing Docker Compose. +This page summarizes the different ways you can install Docker Compose, depending on your platform and needs. ## Installation scenarios -### Scenario one: Install Docker Desktop +### Docker Desktop (Recommended) -The easiest and recommended way to get Docker Compose is to install Docker Desktop. Docker Desktop -includes Docker Compose along with Docker Engine and Docker CLI which are Compose prerequisites. +The easiest and recommended way to get Docker Compose is to install Docker Desktop. -Docker Desktop is available on: +Docker Desktop includes Docker Compose along with Docker Engine and Docker CLI which are Compose prerequisites. + +Docker Desktop is available for: - [Linux](/manuals/desktop/setup/install/linux/_index.md) - [Mac](/manuals/desktop/setup/install/mac-install.md) - [Windows](/manuals/desktop/setup/install/windows-install.md) -If you have already installed Docker Desktop, you can check which version of Compose you have by selecting **About Docker Desktop** from the Docker menu {{< inline-image src="../../desktop/images/whale-x.svg" alt="whale menu" >}}. - -> [!NOTE] -> -> After Docker Compose V1 was removed in Docker Desktop version [4.23.0](/desktop/release-notes/#4230) as it had reached end-of-life, -> the `docker-compose` command now points directly to the Docker Compose V2 binary, running in standalone mode. -> If you rely on Docker Desktop auto-update, the symlink might be broken and command unavailable, as the update doesn't ask for administrator password. +> [!TIP] > -> This only affects Mac users. To fix this, either recreate the symlink: -> ```console -> $ sudo rm /usr/local/bin/docker-compose -> $ sudo ln -s /Applications/Docker.app/Contents/Resources/cli-plugins/docker-compose /usr/local/bin/docker-compose -> ``` -> Or enable [Automatically check configuration](/manuals/desktop/settings-and-maintenance/settings.md) which will detect and fix it for you. +> If you have already installed Docker Desktop, you can check which version of Compose you have by selecting **About Docker Desktop** from the Docker menu {{< inline-image src="../../desktop/images/whale-x.svg" alt="whale menu" >}}. -### Scenario two: Install the Docker Compose plugin +### Plugin (Linux only) > [!IMPORTANT] > -> This install scenario is only available on Linux. +> This method is only available on Linux. If you already have Docker Engine and Docker CLI installed, you can install the Docker Compose plugin from the command line, by either: - [Using Docker's repository](linux.md#install-using-the-repository) - [Downloading and installing manually](linux.md#install-the-plugin-manually) -### Scenario three: Install the Docker Compose standalone +### Standalone (Legacy) > [!WARNING] > diff --git a/content/manuals/compose/install/linux.md b/content/manuals/compose/install/linux.md index c8d68c2526d5..6862b51faea6 100644 --- a/content/manuals/compose/install/linux.md +++ b/content/manuals/compose/install/linux.md @@ -1,16 +1,13 @@ --- -description: Download and install Docker Compose on Linux with this step-by-step handbook. - This plugin can be installed manually or by using a repository. -keywords: install docker compose linux, docker compose linux, docker compose plugin, - docker-compose-plugin, linux install docker compose, install docker-compose linux, - linux install docker-compose, linux docker compose, docker compose v2 linux, install - docker compose on linux +description: Step-by-step instructions for installing the Docker Compose plugin on Linux using a package repository or manual method. +keywords: install docker compose linux, docker compose plugin, docker-compose-plugin linux, docker compose v2, docker compose manual install, linux docker compose toc_max: 3 title: Install the Docker Compose plugin linkTitle: Plugin aliases: - /compose/compose-plugin/ - /compose/compose-linux/ +- /compose/install/compose-plugin/ weight: 10 --- @@ -22,7 +19,7 @@ To install the Docker Compose plugin on Linux, you can either: > [!NOTE] > -> These instructions assume you already have Docker Engine and Docker CLI installed and now want to install the Docker Compose plugin. For the Docker Compose standalone, see [Install the Docker Compose Standalone](standalone.md). +> These instructions assume you already have Docker Engine and Docker CLI installed and now want to install the Docker Compose plugin. ## Install using the repository @@ -57,14 +54,6 @@ To install the Docker Compose plugin on Linux, you can either: $ docker compose version ``` - Expected output: - - ```text - Docker Compose version vN.N.N - ``` - - Where `vN.N.N` is placeholder text standing in for the latest version. - ### Update Docker Compose To update the Docker Compose plugin, run the following commands: @@ -84,9 +73,9 @@ To update the Docker Compose plugin, run the following commands: ## Install the plugin manually -> [!NOTE] +> [!WARNING] > -> This option requires you to manage upgrades manually. It is recommended that you set up Docker's repository for easier maintenance. +> Manual installations don’t auto-update. For ease of maintenance, use the Docker repository method. 1. To download and install the Docker Compose CLI plugin, run: @@ -120,9 +109,8 @@ To update the Docker Compose plugin, run the following commands: ```console $ docker compose version ``` - - Expected output: - ```text - Docker Compose version {{% param "compose_version" %}} - ``` +## What's next? + +- [Understand how Compose works](/manuals/compose/intro/compose-application-model.md) +- [Try the Quickstart guide](/manuals/compose/gettingstarted.md) diff --git a/content/manuals/compose/install/standalone.md b/content/manuals/compose/install/standalone.md index 9e7ff3a02d41..a6cc7106acf6 100644 --- a/content/manuals/compose/install/standalone.md +++ b/content/manuals/compose/install/standalone.md @@ -1,8 +1,8 @@ --- title: Install the Docker Compose standalone linkTitle: Standalone -description: How to install Docker Compose - Other Scenarios -keywords: compose, orchestration, install, installation, docker, documentation +description: Instructions for installing the legacy Docker Compose standalone tool on Linux and Windows Server +keywords: install docker-compose, standalone docker compose, docker-compose windows server, install docker compose linux, legacy compose install toc_max: 3 weight: 20 --- @@ -12,7 +12,8 @@ This page contains instructions on how to install Docker Compose standalone on L > [!WARNING] > > The Docker Compose standalone uses the `-compose` syntax instead of the current standard syntax `compose`. -> For example, you must type `docker-compose up` when using Docker Compose standalone, instead of `docker compose up`. +> For example, you must type `docker-compose up` when using Docker Compose standalone, instead of `docker compose up`. +> Use it only for backward compatibility. ## On Linux @@ -48,7 +49,7 @@ on Microsoft Windows Server](/manuals/engine/install/binaries.md#install-server- In order to proceed with the installation, select **Yes** when asked if you want this app to make changes to your device. 2. Optional. Ensure TLS1.2 is enabled. - GitHub requires TLS1.2 fore secure connections. If you’re using an older version of Windows Server, for example 2016, or suspect that TLS1.2 is not enabled, run the following command in PowerShell: + GitHub requires TLS1.2 for secure connections. If you’re using an older version of Windows Server, for example 2016, or suspect that TLS1.2 is not enabled, run the following command in PowerShell: ```powershell [Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 @@ -74,3 +75,8 @@ on Microsoft Windows Server](/manuals/engine/install/binaries.md#install-server- $ docker-compose.exe version Docker Compose version {{% param "compose_version" %}} ``` + +## What's next? + +- [Understand how Compose works](/manuals/compose/intro/compose-application-model.md) +- [Try the Quickstart guide](/manuals/compose/gettingstarted.md) diff --git a/content/manuals/compose/install/uninstall.md b/content/manuals/compose/install/uninstall.md index 8cda6ca42bc9..714389deda04 100644 --- a/content/manuals/compose/install/uninstall.md +++ b/content/manuals/compose/install/uninstall.md @@ -2,29 +2,32 @@ description: How to uninstall Docker Compose keywords: compose, orchestration, uninstall, uninstallation, docker, documentation title: Uninstall Docker Compose +linkTitle: Uninstall --- -Uninstalling Docker Compose depends on the method you have used to install Docker Compose. On this page you can find specific instructions to uninstall Docker Compose. +How you uninstall Docker Compose depends on how it was installed. This guide covers uninstallation instructions for: +- Docker Compose installed via Docker Desktop +- Docker Compose installed as a CLI plugin -## Uninstalling Docker Desktop +## Uninstalling Docker Compose with Docker Desktop If you want to uninstall Docker Compose and you have installed Docker Desktop, see [Uninstall Docker Desktop](/manuals/desktop/uninstall.md). -> [!NOTE] +> [!WARNING] > -> Unless you have other Docker instances installed on that specific environment, you would be removing Docker altogether by uninstalling Docker Desktop. +> Unless you have other Docker instances installed on that specific environment, uninstalling Docker Desktop removes all Docker components, including Docker Engine, Docker CLI, and Docker Compose. ## Uninstalling the Docker Compose CLI plugin -To remove the Docker Compose CLI plugin, run: +If you installed Docker Compose via a package manager, run: -Ubuntu, Debian: +On Ubuntu or Debian: ```console $ sudo apt-get remove docker-compose-plugin ``` -RPM-based distributions: +On RPM-based distributions: ```console $ sudo yum remove docker-compose-plugin @@ -32,7 +35,7 @@ RPM-based distributions: ### Manually installed -If you used `curl` to install Docker Compose CLI plugin, to uninstall it, run: +If you installed Docker Compose manually (using curl), remove it by deleting the binary: ```console $ rm $DOCKER_CONFIG/cli-plugins/docker-compose @@ -40,7 +43,7 @@ If you used `curl` to install Docker Compose CLI plugin, to uninstall it, run: ### Remove for all users -Or, if you have installed Docker Compose for all users, run: +If installed for all users, remove it from the system directory: ```console $ rm /usr/local/lib/docker/cli-plugins/docker-compose diff --git a/content/manuals/compose/intro/compose-application-model.md b/content/manuals/compose/intro/compose-application-model.md index 510c35cb8bc3..9e20edf3f9a6 100644 --- a/content/manuals/compose/intro/compose-application-model.md +++ b/content/manuals/compose/intro/compose-application-model.md @@ -1,8 +1,8 @@ --- title: How Compose works weight: 10 -description: Understand how Compose works and the Compose application model with an illustrative example -keywords: compose, docker compose, compose specification, compose model +description: Learn how Docker Compose works, from the application model to Compose files and CLI, whilst following a detailed example. +keywords: docker compose, compose.yaml, docker compose model, compose cli, multi-container application, compose example aliases: - /compose/compose-file/02-model/ - /compose/compose-yaml-file/ @@ -21,9 +21,9 @@ Services communicate with each other through [networks](/reference/compose-file/ Services store and share persistent data into [volumes](/reference/compose-file/volumes.md). The Specification describes such a persistent data as a high-level filesystem mount with global options. -Some services require configuration data that is dependent on the runtime or platform. For this, the Specification defines a dedicated [configs](/reference/compose-file/configs.md) concept. From a service container point of view, configs are comparable to volumes, in that they are files mounted into the container. But the actual definition involves distinct platform resources and services, which are abstracted by this type. +Some services require configuration data that is dependent on the runtime or platform. For this, the Specification defines a dedicated [configs](/reference/compose-file/configs.md) concept. From inside the container, configs behave like volumes—they’re mounted as files. However, configs are defined differently at the platform level. -A [secret](/reference/compose-file/secrets.md) is a specific flavor of configuration data for sensitive data that should not be exposed without security considerations. Secrets are made available to services as files mounted into their containers, but the platform-specific resources to provide sensitive data are specific enough to deserve a distinct concept and definition within the Compose specification. +A [secret](/reference/compose-file/secrets.md) is a specific flavor of configuration data for sensitive data that should not be exposed without security considerations. Secrets are made available to services as files mounted into their containers, but the platform-specific resources to provide sensitive data are specific enough to deserve a distinct concept and definition within the Compose Specification. > [!NOTE] > @@ -55,7 +55,9 @@ If you want to reuse other Compose files, or factor out parts of your applicatio ## CLI -The Docker CLI lets you interact with your Docker Compose applications through the `docker compose` command, and its subcommands. Using the CLI, you can manage the lifecycle of your multi-container applications defined in the `compose.yaml` file. The CLI commands enable you to start, stop, and configure your applications effortlessly. +The Docker CLI lets you interact with your Docker Compose applications through the `docker compose` command and its subcommands. If you're using Docker Desktop, the Docker Compose CLI is included by default. + +Using the CLI, you can manage the lifecycle of your multi-container applications defined in the `compose.yaml` file. The CLI commands enable you to start, stop, and configure your applications effortlessly. ### Key commands @@ -77,7 +79,7 @@ If you want to monitor the output of your running containers and debug issues, y $ docker compose logs ``` -To lists all the services along with their current status: +To list all the services along with their current status: ```console $ docker compose ps @@ -101,11 +103,11 @@ Both services communicate with each other on an isolated back-tier network, whil The example application is composed of the following parts: -- 2 services, backed by Docker images: `webapp` and `database` -- 1 secret (HTTPS certificate), injected into the frontend -- 1 configuration (HTTP), injected into the frontend -- 1 persistent volume, attached to the backend -- 2 networks +- Two services, backed by Docker images: `webapp` and `database` +- One secret (HTTPS certificate), injected into the frontend +- One configuration (HTTP), injected into the frontend +- One persistent volume, attached to the backend +- Two networks ```yml services: @@ -148,7 +150,7 @@ networks: back-tier: {} ``` -The `docker compose up` command starts the `frontend` and `backend` services, create the necessary networks and volumes, and injects the configuration and secret into the frontend service. +The `docker compose up` command starts the `frontend` and `backend` services, creates the necessary networks and volumes, and injects the configuration and secret into the frontend service. `docker compose ps` provides a snapshot of the current state of your services, making it easy to see which containers are running, their status, and the ports they are using: @@ -162,6 +164,6 @@ example-backend-1 example/database "docker-entrypoint.s…" backend ## What's next -- [Quickstart](/manuals/compose/gettingstarted.md) +- [Try the Quickstart guide](/manuals/compose/gettingstarted.md) - [Explore some sample applications](/manuals/compose/support-and-feedback/samples-for-compose.md) - [Familiarize yourself with the Compose Specification](/reference/compose-file/_index.md) diff --git a/content/manuals/compose/intro/features-uses.md b/content/manuals/compose/intro/features-uses.md index 1545bd81407b..707b1539d199 100644 --- a/content/manuals/compose/intro/features-uses.md +++ b/content/manuals/compose/intro/features-uses.md @@ -1,6 +1,6 @@ --- -description: Key benefits and use cases of Docker Compose -keywords: documentation, docs, docker, compose, orchestration, containers, uses, benefits +description: Discover the benefits and typical use cases of Docker Compose for containerized application development and deployment +keywords: docker compose, compose use cases, compose benefits, container orchestration, development environments, testing containers, yaml file title: Why use Compose? weight: 20 aliases: @@ -11,16 +11,14 @@ aliases: Using Docker Compose offers several benefits that streamline the development, deployment, and management of containerized applications: -- Simplified control: Docker Compose allows you to define and manage multi-container applications in a single YAML file. This simplifies the complex task of orchestrating and coordinating various services, making it easier to manage and replicate your application environment. +- Simplified control: Define and manage multi-container apps in one YAML file, streamlining orchestration and replication. -- Efficient collaboration: Docker Compose configuration files are easy to share, facilitating collaboration among developers, operations teams, and other stakeholders. This collaborative approach leads to smoother workflows, faster issue resolution, and increased overall efficiency. +- Efficient collaboration: Shareable YAML files support smooth collaboration between developers and operations, improving workflows and issue resolution, leading to increased overall efficiency. - Rapid application development: Compose caches the configuration used to create a container. When you restart a service that has not changed, Compose re-uses the existing containers. Re-using containers means that you can make changes to your environment very quickly. - Portability across environments: Compose supports variables in the Compose file. You can use these variables to customize your composition for different environments, or different users. -- Extensive community and support: Docker Compose benefits from a vibrant and active community, which means abundant resources, tutorials, and support. This community-driven ecosystem contributes to the continuous improvement of Docker Compose and helps users troubleshoot issues effectively. - ## Common use cases of Docker Compose Compose can be used in many different ways. Some common use cases are outlined @@ -67,4 +65,4 @@ For details on using production-oriented features, see - [Learn about the history of Compose](history.md) - [Understand how Compose works](compose-application-model.md) -- [Quickstart](../gettingstarted.md) +- [Try the Quickstart guide](../gettingstarted.md) diff --git a/content/manuals/compose/intro/history.md b/content/manuals/compose/intro/history.md index 51ab5097edb9..a46cc78297ce 100644 --- a/content/manuals/compose/intro/history.md +++ b/content/manuals/compose/intro/history.md @@ -1,7 +1,7 @@ --- title: History and development of Docker Compose linkTitle: History and development -description: History of Compose V1 and Compose YAML schema versioning +description: Explore the evolution of Docker Compose from v1 to v2, including CLI changes, YAML versioning, and the Compose Specification. keywords: compose, compose yaml, swarm, migration, compatibility, docker compose vs docker-compose weight: 30 aliases: @@ -10,41 +10,47 @@ aliases: This page provides: - A brief history of the development of the Docker Compose CLI - - A clear explanation of the major versions and file formats that make up Compose V1 and Compose V2 - - The main differences between Compose V1 and Compose V2 + - A clear explanation of the major versions and file formats that make up Compose v1 and Compose v2 + - The main differences between Compose v1 and Compose v2 ## Introduction -![Image showing the main differences between Compose V1 and Compose V2](../images/v1-versus-v2.png) +![Image showing the main differences between Compose v1 and Compose v2](../images/v1-versus-v2.png) -The image above shows that the currently supported version of the Docker Compose CLI is Compose V2 which is defined by the [Compose Specification](/reference/compose-file/_index.md). +The previous image shows that the currently supported version of the Docker Compose CLI is Compose v2 which is defined by the [Compose Specification](/reference/compose-file/_index.md). It also provides a quick snapshot of the differences in file formats, command-line syntax, and top-level elements. This is covered in more detail in the following sections. ### Docker Compose CLI versioning Version one of the Docker Compose command-line binary was first released in 2014. It was written in Python, and is invoked with `docker-compose`. -Typically, Compose V1 projects include a top-level `version` element in the `compose.yml` file, with values ranging from `2.0` to `3.8`, which refer to the specific [file formats](#compose-file-format-versioning). +Typically, Compose v1 projects include a top-level `version` element in the `compose.yaml` file, with values ranging from `2.0` to `3.8`, which refer to the specific [file formats](#compose-file-format-versioning). Version two of the Docker Compose command-line binary was announced in 2020, is written in Go, and is invoked with `docker compose`. -Compose V2 ignores the `version` top-level element in the `compose.yml` file. +Compose v2 ignores the `version` top-level element in the `compose.yaml` file. ### Compose file format versioning The Docker Compose CLIs are defined by specific file formats. -Three major versions of the Compose file format for Compose V1 were released: +Three major versions of the Compose file format for Compose v1 were released: - Compose file format 1 with Compose 1.0.0 in 2014 - Compose file format 2.x with Compose 1.6.0 in 2016 - Compose file format 3.x with Compose 1.10.0 in 2017 Compose file format 1 is substantially different to all the following formats as it lacks a top-level `services` key. -Its usage is historical and files written in this format don't run with Compose V2. +Its usage is historical and files written in this format don't run with Compose v2. Compose file format 2.x and 3.x are very similar to each other, but the latter introduced many new options targeted at Swarm deployments. To address confusion around Compose CLI versioning, Compose file format versioning, and feature parity depending on whether Swarm mode was in use, file format 2.x and 3.x were merged into the [Compose Specification](/reference/compose-file/_index.md). -Compose V2 uses the Compose Specification for project definition. Unlike the prior file formats, the Compose Specification is rolling and makes the `version` top-level element optional. Compose V2 also makes use of optional specifications - [Deploy](/reference/compose-file/deploy.md), [Develop](/reference/compose-file/develop.md) and [Build](/reference/compose-file/build.md). +Compose v2 uses the Compose Specification for project definition. Unlike the prior file formats, the Compose Specification is rolling and makes the `version` top-level element optional. Compose v2 also makes use of optional specifications - [Deploy](/reference/compose-file/deploy.md), [Develop](/reference/compose-file/develop.md), and [Build](/reference/compose-file/build.md). -To make [migration](/manuals/compose/releases/migrate.md) easier, Compose V2 has backwards compatibility for certain elements that have been deprecated or changed between Compose file format 2.x/3.x and the Compose Specification. +To make [migration](/manuals/compose/releases/migrate.md) easier, Compose v2 has backwards compatibility for certain elements that have been deprecated or changed between Compose file format 2.x/3.x and the Compose Specification. + +## What's next? + +- [How Compose works](compose-application-model.md) +- [Compose Specification reference](/reference/compose-file/_index.md) +- [Migrate from Compose v1 to v2](/manuals/compose/releases/migrate.md) diff --git a/content/manuals/compose/releases/migrate.md b/content/manuals/compose/releases/migrate.md index 5732e6349c00..df6305a72ea6 100644 --- a/content/manuals/compose/releases/migrate.md +++ b/content/manuals/compose/releases/migrate.md @@ -1,34 +1,34 @@ --- -title: Migrate to Compose V2 +linkTitle: Migrate to Compose v2 +Title: Migrate from Docker Compose v1 to v2 weight: 20 -description: How to migrate from Compose V1 to V2 -keywords: compose, upgrade, migration, v1, v2, docker compose vs docker-compose +description: Step-by-step guidance to migrate from Compose v1 to v2, including syntax differences, environment handling, and CLI changes +keywords: migrate docker compose, upgrade docker compose v2, docker compose migration, docker compose v1 vs v2, docker compose CLI changes, docker-compose to docker compose aliases: - /compose/compose-v2/ - /compose/cli-command-compatibility/ - /compose/migrate/ --- -From July 2023 Compose V1 stopped receiving updates. It’s also no longer available in new releases of Docker Desktop. +From July 2023, Compose v1 stopped receiving updates. It’s also no longer available in new releases of Docker Desktop. -Compose V2, which was first released in 2020, is included with all currently supported versions of Docker Desktop. It offers an improved CLI experience, improved build performance with BuildKit, and continued new-feature development. +Compose v2, which was first released in 2020, is included with all currently supported versions of Docker Desktop. It offers an improved CLI experience, improved build performance with BuildKit, and continued new-feature development. -## How do I switch to Compose V2? +## How do I switch to Compose v2? -The easiest and recommended way is to make sure you have the latest version of [Docker Desktop](/manuals/desktop/release-notes.md), which bundles the Docker Engine and Docker CLI platform including Compose V2. +The easiest and recommended way is to make sure you have the latest version of [Docker Desktop](/manuals/desktop/release-notes.md), which bundles the Docker Engine and Docker CLI platform including Compose v2. -With Docker Desktop, Compose V2 is always accessible as `docker compose`. -Additionally, the **Use Compose V2** setting is turned on by default, which provides an alias from `docker-compose`. +With Docker Desktop, Compose v2 is always accessible as `docker compose`. -For manual installs on Linux, you can get Compose V2 by either: +For manual installs on Linux, you can get Compose v2 by either: - [Using Docker's repository](/manuals/compose/install/linux.md#install-using-the-repository) (recommended) - [Downloading and installing manually](/manuals/compose/install/linux.md#install-the-plugin-manually) -## What are the differences between Compose V1 and Compose V2? +## What are the differences between Compose v1 and Compose v2? ### `docker-compose` vs `docker compose` -Unlike Compose V1, Compose V2 integrates into the Docker CLI platform and the recommended command-line syntax is `docker compose`. +Unlike Compose v1, Compose v2 integrates into the Docker CLI platform and the recommended command-line syntax is `docker compose`. The Docker CLI platform provides a consistent and predictable set of options and flags, such as the `DOCKER_HOST` environment variable or the `--context` command-line flag. @@ -37,49 +37,49 @@ For example, `docker --log-level=debug --tls compose up` enables debug logging f > [!TIP] > -> Update scripts to use Compose V2 by replacing the hyphen (`-`) with a space, using `docker compose` instead of `docker-compose`. +> Update scripts to use Compose v2 by replacing the hyphen (`-`) with a space, using `docker compose` instead of `docker-compose`. ### Service container names Compose generates container names based on the project name, service name, and scale/replica count. -In Compose V1, an underscore (`_`) was used as the word separator. -In Compose V2, a hyphen (`-`) is used as the word separator. +In Compose v1, an underscore (`_`) was used as the word separator. +In Compose v2, a hyphen (`-`) is used as the word separator. Underscores aren't valid characters in DNS hostnames. -By using a hyphen instead, Compose V2 ensures service containers can be accessed over the network via consistent, predictable hostnames. +By using a hyphen instead, Compose v2 ensures service containers can be accessed over the network via consistent, predictable hostnames. -For example, running the Compose command `-p myproject up --scale=1 svc` results in a container named `myproject_svc_1` with Compose V1 and a container named `myproject-svc-1` with Compose V2. +For example, running the Compose command `-p myproject up --scale=1 svc` results in a container named `myproject_svc_1` with Compose v1 and a container named `myproject-svc-1` with Compose v2. > [!TIP] > ->In Compose V2, the global `--compatibility` flag or `COMPOSE_COMPATIBILITY` environment variable preserves the Compose V1 behavior to use underscores (`_`) as the word separator. -As this option must be specified for every Compose V2 command run, it's recommended that you only use this as a temporary measure while transitioning to Compose V2. +> In Compose v2, the global `--compatibility` flag or `COMPOSE_COMPATIBILITY` environment variable preserves the Compose v1 behavior to use underscores (`_`) as the word separator. +As this option must be specified for every Compose v2 command run, it's recommended that you only use this as a temporary measure while transitioning to Compose v2. ### Command-line flags and subcommands -Compose V2 supports almost all Compose V1 flags and subcommands, so in most cases, it can be used as a drop-in replacement in scripts. +Compose v2 supports almost all Compose V1 flags and subcommands, so in most cases, it can be used as a drop-in replacement in scripts. -#### Unsupported in V2 +#### Unsupported in v2 -The following were deprecated in Compose V1 and aren't supported in Compose V2: +The following were deprecated in Compose v1 and aren't supported in Compose v2: * `docker-compose scale`. Use `docker compose up --scale` instead. * `docker-compose rm --all` -#### Different in V2 +#### Different in v2 -The following behave differently between Compose V1 and V2: +The following behave differently between Compose v1 and v2: -| | Compose V1 | Compose V2 | +| | Compose v1 | Compose v2 | |-------------------------|------------------------------------------------------------------|-------------------------------------------------------------------------------| -| `--compatibility` | Deprecated. Migrates YAML fields based on legacy schema version. | Uses `_` as word separator for container names instead of `-` to match V1. | +| `--compatibility` | Deprecated. Migrates YAML fields based on legacy schema version. | Uses `_` as word separator for container names instead of `-` to match v1. | | `ps --filter KEY-VALUE` | Undocumented. Allows filtering by arbitrary service properties. | Only allows filtering by specific properties, e.g. `--filter=status=running`. | ### Environment variables -Environment variable behavior in Compose V1 wasn't formally documented and behaved inconsistently in some edge cases. +Environment variable behavior in Compose v1 wasn't formally documented and behaved inconsistently in some edge cases. -For Compose V2, the [Environment variables](/manuals/compose/how-tos/environment-variables/_index.md) section covers both [precedence](/manuals/compose/how-tos/environment-variables/envvars-precedence.md) as well as [`.env` file interpolation](/manuals/compose/how-tos/environment-variables/variable-interpolation.md) and includes many examples covering tricky situations such as escaping nested quotes. +For Compose v2, the [Environment variables](/manuals/compose/how-tos/environment-variables/_index.md) section covers both [precedence](/manuals/compose/how-tos/environment-variables/envvars-precedence.md) as well as [`.env` file interpolation](/manuals/compose/how-tos/environment-variables/variable-interpolation.md) and includes many examples covering tricky situations such as escaping nested quotes. Check if: - Your project uses multiple levels of environment variable overrides, for example `.env` file and `--env` CLI flags. @@ -89,17 +89,17 @@ Check if: > [!TIP] > -> Run `docker compose config` on the project to preview the configuration after Compose V2 has performed interpolation to +> Run `docker compose config` on the project to preview the configuration after Compose v2 has performed interpolation to verify that values appear as expected. > -> Maintaining backwards compatibility with Compose V1 is typically achievable by ensuring that literal values (no +> Maintaining backwards compatibility with Compose v1 is typically achievable by ensuring that literal values (no interpolation) are single-quoted and values that should have interpolation applied are double-quoted. -## What does this mean for my projects that use Compose V1? +## What does this mean for my projects that use Compose v1? -For most projects, switching to Compose V2 requires no changes to the Compose YAML or your development workflow. +For most projects, switching to Compose v2 requires no changes to the Compose YAML or your development workflow. -It's recommended that you adapt to the new preferred way of running Compose V2, which is to use `docker compose` instead of `docker-compose`. +It's recommended that you adapt to the new preferred way of running Compose v2, which is to use `docker compose` instead of `docker-compose`. This provides additional flexibility and removes the requirement for a `docker-compose` compatibility alias. However, Docker Desktop continues to support a `docker-compose` alias to redirect commands to `docker compose` for convenience and improved compatibility with third-party tools and scripts. @@ -108,28 +108,28 @@ However, Docker Desktop continues to support a `docker-compose` alias to redirec ### Migrating running projects -In both V1 and V2, running `up` on a Compose project recreates service containers as necessary to reach the desired state based on comparing the actual state in the Docker Engine to the resolved project configuration including Compose YAML, environment variables, and command-line flags. +In both v1 and v2, running up on a Compose project recreates service containers as needed. It compares the actual state in the Docker Engine to the resolved project configuration, which includes the Compose YAML, environment variables, and command-line flags. -Because Compose V1 and V2 [name service containers differently](#service-container-names), running `up` using V2 the first time on a project with running services originally launched by V1, results in service containers being recreated with updated names. +Because Compose v1 and v2 [name service containers differently](#service-container-names), running `up` using v2 the first time on a project with running services originally launched by v1, results in service containers being recreated with updated names. -Note that even if `--compatibility` flag is used to preserve the V1 naming style, Compose still needs to recreate service containers originally launched by V1 the first time `up` is run by V2 to migrate the internal state. +Note that even if `--compatibility` flag is used to preserve the v1 naming style, Compose still needs to recreate service containers originally launched by v1 the first time `up` is run by v2 to migrate the internal state. -### Using Compose V2 with Docker-in-Docker +### Using Compose v2 with Docker-in-Docker -Compose V2 is now included in the [Docker official image on Docker Hub](https://hub.docker.com/_/docker). +Compose v2 is now included in the [Docker official image on Docker Hub](https://hub.docker.com/_/docker). -Additionally, a new [docker/compose-bin image on Docker Hub](https://hub.docker.com/r/docker/compose-bin) packages the latest version of Compose V2 for use in multi-stage builds. +Additionally, a new [docker/compose-bin image on Docker Hub](https://hub.docker.com/r/docker/compose-bin) packages the latest version of Compose v2 for use in multi-stage builds. -## Can I still use Compose V1 if I want to? +## Can I still use Compose v1 if I want to? -Yes. You can still download and install Compose V1 packages, but you won't get support from Docker if anything breaks. +Yes. You can still download and install Compose v1 packages, but you won't get support from Docker if anything breaks. >[!WARNING] > -> The final Compose V1 release, version 1.29.2, was May 10, 2021. These packages haven't received any security updates since then. Use at your own risk. +> The final Compose v1 release, version 1.29.2, was May 10, 2021. These packages haven't received any security updates since then. Use at your own risk. ## Additional Resources -- [docker-compose V1 on PyPI](https://pypi.org/project/docker-compose/1.29.2/) -- [docker/compose V1 on Docker Hub](https://hub.docker.com/r/docker/compose) -- [docker-compose V1 source on GitHub](https://github.com/docker/compose/releases/tag/1.29.2) +- [docker-compose v1 on PyPI](https://pypi.org/project/docker-compose/1.29.2/) +- [docker/compose v1 on Docker Hub](https://hub.docker.com/r/docker/compose) +- [docker-compose v1 source on GitHub](https://github.com/docker/compose/releases/tag/1.29.2) diff --git a/content/manuals/compose/releases/release-notes.md b/content/manuals/compose/releases/release-notes.md index 3cf8874e34f4..1e683a1dc083 100644 --- a/content/manuals/compose/releases/release-notes.md +++ b/content/manuals/compose/releases/release-notes.md @@ -13,6 +13,269 @@ aliases: For more detailed information, see the [release notes in the Compose repo](https://github.com/docker/compose/releases/). +## 2.38.2 + +{{< release-date date="2025-07-08" >}} + +### Bug fixes and enhancements + +- Added `--networks` flag to `config` command to list networks +- Fixed an issue on `down` command with Docker Model Runner used as a provider service +- Fixed a display issue on Docker Model Runner progress +- Fixed an issue with services with profile missing secrets + +### Update + +- Dependencies upgrade: bump docker engine and cli to v28.3.1 +- Dependencies upgrade: bump buildkit to v0.23.2 +- Dependencies upgrade: bump golang to v1.23.10 + +## 2.38.1 + +{{< release-date date="2025-06-30" >}} + +### Bug fixes and enhancements + +- Added support of `model_variable` for service `models` configuration + +### Update + +- Dependencies upgrade: bump compose-go to v2.7.1 + +## 2.38.0 + +{{< release-date date="2025-06-30" >}} + +### Bug fixes and enhancements + +- Introduced support of `models` for LLM configuration +- Added `volumes` command +- Removed `publish` limitation on bind mounts +- Fixed an issue mounting the docker socket to container which doesn't need it +- Fixed an issue with bake hanging on output + +### Update + +- Dependencies upgrade: bump compose-go to v2.7.0 +- Dependencies upgrade: bump docker engine and cli to v28.3.0 + +## 2.37.3 + +{{< release-date date="2025-06-24" >}} + +### Bug fixes and enhancements + +- Added support of `cache_to` for Bake +- Fixed issue with Bake integration +- Fixed multiple issues affecting `run` command + +### Update + +- Dependencies upgrade: bump buildkit to v0.23.1 + +## 2.37.2 + +{{< release-date date="2025-06-20" >}} + +### Bug fixes and enhancements + +- Introduce `use_api_socket` +- Fixed `compose images` JSON output format +- Fixed panic using `w` shortcut on project without watch support +- Fixed a permission issue with bake metadata files on Windows +- Fixed a panic error on provider service startup + +### Update + +- Dependencies upgrade: bump compose-go to v2.6.5 +- Dependencies upgrade: bump buildx to v0.25.0 +- Dependencies upgrade: bump buildkit to v0.23.0 + +## 2.37.1 + +{{< release-date date="2025-06-12" >}} + +### Bug fixes and enhancements + +- Fixed a permission issue with bake metadata files on Windows +- Fixed a panic error on provider service startup +- Reverted `compose images` JSON output to array format + +## 2.37.0 + +{{< release-date date="2025-06-05" >}} + +### Bug fixes and enhancements + +- Fixed an issue with random port allocation +- Fixed an issue recreating containers when not needed during inner loop +- Fixed a problem during `up --build` with `additional_context` + +### Update + +- Dependencies upgrade: bump compose-go to v2.6.4 +- Dependencies upgrade: bump buildx to v0.24.0 +- Dependencies upgrade: bump buildkit to v0.22.0 + +## 2.36.2 + +{{< release-date date="2025-05-23" >}} + +### Bug fixes and enhancements + +- Compose Bridge features are now part of Compose +- Improved display of the `docker compose images` command +- Promoted `bake` as the default build tool for Compose +- Fixed issues around build flow +- Fixed the restart of dependent services after `watch` rebuild images + +### Update + +- Dependencies upgrade: bump docker engine and cli to v28.2.2 + +## 2.36.1 + +{{< release-date date="2025-05-19" >}} + +### Bug fixes and enhancements + +- Introduced support of arrays for `provider` service `options` attribute +- Added `debug` messages in the extension protocol +- Fixed an issue when trying to publish a Compose application with a `provider` service +- Fixed build issues on Compose applications with `service.provider` +- Introduced `--lock-image-digests` to `config` command + +### Update + +- Dependencies upgrade: bump compose-go to v2.6.3 +- Dependencies upgrade: bump containerd to 2.1.0 + +## 2.36.0 + +{{< release-date date="2025-05-07" >}} + +### Bug fixes and enhancements + +- Introduced `networks.interface_name` +- Added support for `COMPOSE_PROGRESS` env variable +- Added `service.provider` to external binaries +- Introduced build `--check` flag +- Fixed multiple panic issues when parsing Compose files + +### Update + +- Dependencies upgrade: bump compose-go to v2.6.2 +- Dependencies upgrade: bump docker engine and cli to v28.1.0 +- Dependencies upgrade: bump containerd to 2.0.5 +- Dependencies upgrade: bump buildkit to v0.21.1 + +## 2.35.1 + +{{< release-date date="2025-04-17" >}} + +### Bug fixes and enhancements + +- Fixed an issue with bind mounts + +### Update + +- Dependencies upgrade: bump compose-go to v2.6.0 +- Dependencies upgrade: bump docker engine and cli to v28.0.4 +- Dependencies upgrade: bump buildx to v0.22.0 + +## 2.35.0 + +{{< release-date date="2025-04-10" >}} + +### Bug fixes and enhancements + +- Added support for [Docker Model Runner](/manuals/ai/model-runner.md) to easily integrate AI models into your Compose applications +- Added `build --print` command to help debug complex build configurations by showing the equivalent bake file +- Added `volume.type=image` to provide more flexible volume management for container images +- Added `--quiet` options to the `run` command for cleaner output when running containers +- Added `config --no-env-resolution` option to view raw configuration without environment variable substitution +- Fixed behavior of `depends_on` to prevent unnecessary container recreation when dependencies change +- Fixed support for secrets defined by environment variables when using `include` +- Fixed volume mount handling to ensure bind mounts work correctly in all scenarios + +### Update + +- Dependencies upgrade: bump docker engine and cli to v28.1.0 +- Dependencies upgrade: bump buildx to v0.23.0 +- Dependencies upgrade: bump buildkit to v0.21.0 + +## 2.34.0 + +{{< release-date date="2025-03-14" >}} + +### Bug fixes and enhancements + +- Added support of refresh `pull_policy` values `daily`, `weekly` and `every_` +- Introduced `include` attribut to `watch` definition to match file patterns +- Introduced `--env-from-file` in flag for the `docker compose run` command +- Promoted `publish` as a regular command of Compose +- Fixed a bug by loading `env_file` after services have been selected + +### Update + +- Dependencies upgrade: bump docker engine and cli to v28.0.1 +- Dependencies upgrade: bump buildkit to v0.17.1 +- Dependencies upgrade: Bump compose-go v2.4.9 +- Dependencies upgrade: Bump buildx v0.21.2 + +## 2.33.1 + +{{< release-date date="2025-02-21" >}} + +### Bug fixes and enhancements + +- Added support for `gw_priority`, `enable_ipv4` (requires Docker v28.0) +- Fixed an issue with the navigation menu +- Improved error message when using non-file secret/config with read-only service + +### Update + +- Dependencies upgrade: bump docker engine and cli to v28.0.0 + +## 2.33.0 + +{{< release-date date="2025-02-13" >}} + +### Bug fixes and enhancements + +- Introduced a hint to promote the use of [Bake](/build/bake/) +- Introduced support for the `additional_context` attribute referencing another service +- Added support for `BUILDKIT_PROGRESS` +- Compose now warns you when a published Compose application includes environment variables +- Added a `--with-env` flag to publish a Compose application with environment variables +- Updated `ls --quiet` help description +- Fixed multiple issues delegating build to Bake +- Updated help in `stats` command +- Fixed support for "builtin" seccomp profile +- Fixed support for `watch` with multiple services +- Removed exit code per error type used by legacy metrics system +- Fixed test coverage for `compatibility` +- Removed raw os.Args sent to OpenTelemetry +- Enabled copyloopvar linter +- Fixed provenance for binaries and generate SBOM +- Main branch for docs upstream validation is now used +- Added codeowners file +- Added Docker Engine v28.x to the test-matrix + +### Update + +- Dependencies upgrade: Bump compose-go v2.4.8 +- Dependencies upgrade: Bump buildx v0.20.1 +- Dependencies upgrade: Bump docker to v27.5.1 +- Dependencies upgrade: Bump golangci-lint to v1.63.4 +- Dependencies upgrade: Bump golang.org/x/sys from 0.28.0 to 0.30.0 +- Dependencies upgrade: Bump github.com/moby/term v0.5.2 +- Dependencies upgrade: Bump github.com/otiai10/copy from 1.14.0 to 1.14.1 +- Dependencies upgrade: Bump github.com/jonboulle/clockwork from 0.4.0 to 0.5.0 +- Dependencies upgrade: Bump github.com/spf13/pflag from 1.0.5 to 1.0.6 +- Dependencies upgrade: Bump golang.org/x/sync from 0.10.0 to 0.11.0 +- Dependencies upgrade: Bump gotest.tools/v3 from 3.5.1 to 3.5.2 + ## 2.32.4 {{< release-date date="2025-01-16" >}} @@ -126,7 +389,7 @@ For more detailed information, see the [release notes in the Compose repo](https ### Bug fixes and enhancements -- Fixed an issue re-creating services when updating its profiles +- Fixed an issue re-creating services when updating its profiles - Fixed a regression when using the same YAML anchor multiple times in a Compose file ## 2.30.1 @@ -199,7 +462,7 @@ For more detailed information, see the [release notes in the Compose repo](https ### Bug fixes and enhancements -- Fixed an issue with services not stopping when restarting diverged dependencies. +- Fixed an issue with services not stopping when restarting diverged dependencies. - Fixed potential `nil` pointer error on the OTEL client. ## 2.29.3 @@ -213,7 +476,7 @@ For more detailed information, see the [release notes in the Compose repo](https ### Bug fixes and enhancements -- Combination of bind mount and `rebuild` are now allowed with `watch`. +- Combination of bind mount and `rebuild` are now allowed with `watch`. - Fixed a bug recreating containers when `--no-deps` is used with `up`. - Fixed a bug not closing streams when reattaching containers. - Restored recreation of anonymous volumes when using `-V` or `--renew-anon-volumes`. @@ -236,7 +499,7 @@ For more detailed information, see the [release notes in the Compose repo](https - Fixed the docs on `docker compose kill` usage. - Fixed redundant condition from `toAPIBuildOptions` in build.go. -- Fixed initial Watch `sync` after Compose restarts with introduction of `x-initSync`. +- Fixed initial Watch `sync` after Compose restarts with introduction of `x-initialSync`. - Fixed an issue which stopped the Compose process for a single container on `sync-restart` Watch action. ## 2.29.1 @@ -412,8 +675,8 @@ For more detailed information, see the [release notes in the Compose repo](https ### Bug fixes and enhancements -- Compose now ensures stable priority sort order for networks -- Fixed interpolation with curly braces (e.g. JSON) in default values +- Compose now ensures stable priority sort order for networks +- Fixed interpolation with curly braces (e.g. JSON) in default values - Fixed validation for non-unique `container_name` values - Fixed validation for `develop.watch` - Fixed environment loading for `include` @@ -508,15 +771,15 @@ This release fixes a build issue with Docker Desktop for Windows introduced in C - Dependencies upgrade: bump cli to 25.0.0-beta.3 - Dependencies upgrade: bump compose-go to 2.0.0-beta.3 -- Dependencies upgrade: bump golang to 1.21.6 +- Dependencies upgrade: bump golang to 1.21.6 ### Bug fixes and enhancements - Introduced `docker compose attach` to attach local standard input, output, and error streams to a service's running container. - Introduced `docker compose stats` to display a live stream of container(s) resource usage statistics. - Introduced `docker compose ps --orphans` to include/exclude services not declared. -- Introduced `docker compose logs --index` to select a replica container. -- Introduced `docker compose build --with-dependencies` to also build dependencies. +- Introduced `docker compose logs --index` to select a replica container. +- Introduced `docker compose build --with-dependencies` to also build dependencies. - Added source policies for build. - Included disabled services for shell completion. - Restored `Project` in ps JSON output. @@ -543,9 +806,9 @@ This release fixes a build issue with Docker Desktop for Windows introduced in C ### Update -- Dependencies upgrade: bump buildkit 0.12.3 -- Dependencies upgrade: bump docker 24.0.7 -- Dependencies upgrade: bump cli 24.0.7 +- Dependencies upgrade: bump buildkit 0.12.3 +- Dependencies upgrade: bump docker 24.0.7 +- Dependencies upgrade: bump cli 24.0.7 - Dependencies upgrade: bump 1.20.2 ### Bug fixes and enhancements @@ -569,7 +832,7 @@ This release fixes a build issue with Docker Desktop for Windows introduced in C - Introduced `--resolve-image-digests` so users can seal service images by digest when publishing a Compose application. - Improved Compose Watch configuration logging. - Compose now rejects a Compose file using `secrets|configs.driver` or `template_driver`. -- Compose now fails to start if a dependency is missing. +- Compose now fails to start if a dependency is missing. - Fixed SIGTERM support to stop/kill stack. - Fixed a `--hash` regression. - Fixed "Application failed to start after update" when an external network is on a watched service. @@ -703,7 +966,7 @@ This release fixes a build issue with Docker Desktop for Windows introduced in C * Fixed a DryRun mode issue when initializing CLI client. * Fixed a bug with random missing network when a service has more than one. * Fixed the Secrets file permission value to comply with the Compose Specification. -* Fixed an issue about `no-deps` flag not being applied. +* Fixed an issue about `no-deps` flag not being applied. * Fixed some source code comments. * Fixed a bug when `--index` is not set select. * Fixed a process leak in the wait e2e test. @@ -741,7 +1004,7 @@ This release fixes a build issue with Docker Desktop for Windows introduced in C - Introduced `run --cap-add` to run maintenance commands using service image. - Fixed a bug during detection of swarm mode. - Fixed a bug when setting the project name via `COMPOSE_PROJECT_NAME` environment variable. -- Adjusted the display of the volumes flag with the help of `down` command. +- Adjusted the display of the volumes flag with the help of `down` command. - Fixed a bug in the `up` command which should not silently ignore missing `depends_on` services. - Aligned forward signal to container behaviour with the `docker run` one. - Compose now detects network name conflict. @@ -1000,10 +1263,10 @@ This release fixes a build issue with Docker Desktop for Windows introduced in C - Fixed race condition when collecting pulled images IDs. Fixed [compose#9897](https://github.com/docker/compose/pull/9897) - Compose doesn't stop the `pull` command for images that can be built. Fixed [compose#8724](https://github.com/docker/compose/pull/8724) - Fixed corner case when there's no container to attach to. Fixed [compose#8752](https://github.com/docker/compose/pull/8752) -- Compose containers' startup must run sequentially for engine to assign distinct ports within a configured range. Fixed +- Compose containers' startup must run sequentially for engine to assign distinct ports within a configured range. Fixed [compose#8530](https://github.com/docker/compose/pull/8530) - Fixed parsing of `repository:tag`. Fixed [compose#9208](https://github.com/docker/compose/pull/9208) -- Load project from files when explicitly set by user. Fixed [compose#9554](https://github.com/docker/compose/pull/9554) +- Load project from files when explicitly set by user. Fixed [compose#9554](https://github.com/docker/compose/pull/9554) ## 2.14.0 @@ -1271,7 +1534,7 @@ In this release, Docker Compose recreates new resources (networks, volumes, secr ### Bug fixes and enhancements - Fixed interpolation error message output. Fixes [compose-spec/compose-go#292](https://github.com/compose-spec/compose-go/pull/292). -- Defined precedence of the environment variables evaluation. Fixes [compose#9521](https://github.com/docker/compose/issues/9606), +- Defined precedence of the environment variables evaluation. Fixes [compose#9521](https://github.com/docker/compose/issues/9606), [compose#9638](https://github.com/docker/compose/issues/9638), [compose#9608](https://github.com/docker/compose/issues/9608), [compose#9578](https://github.com/docker/compose/issues/9578). @@ -1293,7 +1556,7 @@ For the full change log or additional information, check the [Compose repository ### Updates -- Dependencies upgrade: bumped [go to 1.18.4](https://github.com/golang/go/compare/go1.18.3...go1.18.4). +- Dependencies upgrade: bumped [go to 1.18.4](https://github.com/golang/go/compare/go1.18.3...go1.18.4). - Dependencies upgrade: bumped [compose-go to v1.2.9](https://github.com/compose-spec/compose-go/releases/tag/v1.2.9). ### Bug fixes and enhancements @@ -1376,7 +1639,7 @@ For the full change log or additional information, check the [Compose repository - Dependencies update: bumping [compose-go to 1.2.6](https://github.com/compose-spec/compose-go/releases/tag/v1.2.6). - Dependencies update: bumping [compose-go to 1.2.7](https://github.com/compose-spec/compose-go/releases/tag/v1.2.7). -- Dependencies update: bumping [golang to 1.18](https://go.dev/doc/devel/release#go1.18). +- Dependencies update: bumping [golang to 1.18](https://go.dev/doc/devel/release#go1.18). ### Bug fixes and enhancements @@ -1439,10 +1702,10 @@ For the full change log or additional information, check the [Compose repository - Added ssh config to the build options when building an image from a `docker compose up` command. Fixes [#9338](https://github.com/docker/compose/issues/9338). - Added inspection to container checking if a TTY is required. Running services with `tty:true` specified now show console output. Fixes [#9288](https://github.com/docker/compose/issues/9288). -For the full change log or additional information, check the [Compose repository 2.4.1 release page](https://github.com/docker/compose/releases/tag/v2.4.1). +For the full change log or additional information, check the [Compose repository 2.4.1 release page](https://github.com/docker/compose/releases/tag/v2.4.1). -## 2.4.0 +## 2.4.0 {{< release-date date="2022-04-1" >}} @@ -1464,10 +1727,10 @@ For the full change log or additional information, check the [Compose repository - Removed code regarding an obsolete warning. - Vendor: github.com/containerd/containerd v1.6.2. Includes a fix for CVE-2022-24769 (doesn't affect our codebase). -For the full change log or additional information, check the [Compose repository 2.4.0 release page](https://github.com/docker/compose/releases/tag/v2.4.0). +For the full change log or additional information, check the [Compose repository 2.4.0 release page](https://github.com/docker/compose/releases/tag/v2.4.0). -## 2.3.4 +## 2.3.4 {{< release-date date="2022-03-25" >}} @@ -1479,7 +1742,7 @@ For the full change log or additional information, check the [Compose repository - Removed a container with no candidate now produces a warning instead of an error. Fixes [#9255](https://github.com/docker/compose/issues/9255). - Removed the "Deprecated" mentions from -i and -t options to run and exec commands. These options are on by default and in use. Fixes [#9229](https://github.com/docker/compose/pull/9229#discussion_r819730788). -- Removed the "Deprecated" mention from the --filter flag, to keep consistency with other commands. +- Removed the "Deprecated" mention from the --filter flag, to keep consistency with other commands. - Removed the need to get the original compose.yaml file to run 'docker compose kill'. ### Updates @@ -1495,9 +1758,9 @@ For the full change log or additional information, check the [Compose repository Fixes [#9172](https://github.com/docker/compose/issues/9172), [#9145](https://github.com/docker/compose/issues/9145). - Changed Compose API reference docs automation to pick up diffs code vs. docs. -For the full change log or additional information, check the [Compose repository 2.3.4 release page](https://github.com/docker/compose/releases/tag/v2.3.4). +For the full change log or additional information, check the [Compose repository 2.3.4 release page](https://github.com/docker/compose/releases/tag/v2.3.4). -## Other Releases +## Other Releases (2022-03-8 to 2022-04-14) @@ -1629,7 +1892,7 @@ For a list of PRs and issues fixed in this release, see [Compose 1.28.3](https:/ - CI setup update -## 1.28.0 +## 1.28.0 (2021-01-20) diff --git a/content/manuals/compose/support-and-feedback/faq.md b/content/manuals/compose/support-and-feedback/faq.md index 4e26cb73841e..106f16af2f3f 100644 --- a/content/manuals/compose/support-and-feedback/faq.md +++ b/content/manuals/compose/support-and-feedback/faq.md @@ -1,7 +1,7 @@ --- -description: Frequently asked questions for Docker Compose -keywords: documentation, docs, docker, compose, faq, docker compose vs docker-compose -title: Compose FAQs +description: Answers to common questions about Docker Compose, including v1 vs v2, commands, shutdown behavior, and development setup. +keywords: docker compose faq, docker compose questions, docker-compose vs docker compose, docker compose json, docker compose stop delay, run multiple docker compose +title: Frequently asked questions about Docker Compose linkTitle: FAQs weight: 10 tags: [FAQ] @@ -11,16 +11,16 @@ aliases: ### What is the difference between `docker compose` and `docker-compose` -Version one of the Docker Compose command-line binary was first released in 2014. It was written in Python, and is invoked with `docker-compose`. Typically, Compose V1 projects include a top-level version element in the compose.yml file, with values ranging from 2.0 to 3.8, which refer to the specific file formats. +Version one of the Docker Compose command-line binary was first released in 2014. It was written in Python, and is invoked with `docker-compose`. Typically, Compose v1 projects include a top-level version element in the `compose.yaml` file, with values ranging from 2.0 to 3.8, which refer to the specific file formats. -Version two of the Docker Compose command-line binary was announced in 2020, is written in Go, and is invoked with `docker compose`. Compose V2 ignores the version top-level element in the compose.yml file. +Version two of the Docker Compose command-line binary was announced in 2020, is written in Go, and is invoked with `docker compose`. Compose v2 ignores the version top-level element in the compose.yaml file. For further information, see [History and development of Compose](/manuals/compose/intro/history.md). ### What's the difference between `up`, `run`, and `start`? Typically, you want `docker compose up`. Use `up` to start or restart all the -services defined in a `compose.yml`. In the default "attached" +services defined in a `compose.yaml`. In the default "attached" mode, you see all the logs from all the containers. In "detached" mode (`-d`), Compose exits after starting the containers, but the containers continue to run in the background. @@ -91,7 +91,7 @@ any JSON file should be valid YAML. To use a JSON file with Compose, specify the filename to use, for example: ```console -$ docker compose -f docker-compose.json up +$ docker compose -f compose.json up ``` ### Should I include my code with `COPY`/`ADD` or a volume? diff --git a/content/manuals/compose/support-and-feedback/feedback.md b/content/manuals/compose/support-and-feedback/feedback.md index bfc6e06ae637..494e821488b7 100644 --- a/content/manuals/compose/support-and-feedback/feedback.md +++ b/content/manuals/compose/support-and-feedback/feedback.md @@ -9,16 +9,10 @@ aliases: There are many ways you can provide feedback on Docker Compose. -### In-product feedback - -If you have obtained Docker Compose through Docker Desktop, you can use the `docker feedback` command to submit feedback directly from the command line. - - - ### Report bugs or problems on GitHub To report bugs or problems, visit [Docker Compose on GitHub](https://github.com/docker/compose/issues) ### Feedback via Community Slack channels -You can also provide feedback through the #docker-compose [Docker Community Slack](https://dockr.ly/comm-slack) channel. +You can also provide feedback through the `#docker-compose` [Docker Community Slack](https://dockr.ly/comm-slack) channel. diff --git a/content/manuals/compose/support-and-feedback/samples-for-compose.md b/content/manuals/compose/support-and-feedback/samples-for-compose.md index 39f42152d573..cbeb84380e40 100644 --- a/content/manuals/compose/support-and-feedback/samples-for-compose.md +++ b/content/manuals/compose/support-and-feedback/samples-for-compose.md @@ -17,30 +17,12 @@ if you have not already done so. The samples should help you to: - Define services based on Docker images using - [Compose files](/reference/compose-file/_index.md): `compose.yml` and - `docker-stack.yml` -- Understand the relationship between `compose.yml` and + [Compose files](/reference/compose-file/_index.md) +- Understand the relationship between `compose.yaml` and [Dockerfiles](/reference/dockerfile/) - Learn how to make calls to your application services from Compose files - Learn how to deploy applications and services to a [swarm](/manuals/engine/swarm/_index.md) -## Samples tailored to demo Compose - -These samples focus specifically on Docker Compose: - -- [Quickstart: Compose and ELK](https://github.com/docker/awesome-compose/tree/master/elasticsearch-logstash-kibana/README.md) - Shows - how to use Docker Compose to set up and run ELK - Elasticsearch-Logstash-Kibana. - -- [Quickstart: Compose and Django](https://github.com/docker/awesome-compose/tree/master/official-documentation-samples/django/README.md) - Shows - how to use Docker Compose to set up and run a simple Django/PostgreSQL app. - -- [Quickstart: Compose and Rails](https://github.com/docker/awesome-compose/tree/master/official-documentation-samples/rails/README.md) - Shows - how to use Docker Compose to set up and run a Rails/PostgreSQL app. - -- [Quickstart: Compose and WordPress](https://github.com/docker/awesome-compose/tree/master/official-documentation-samples/wordpress/README.md) - Shows - how to use Docker Compose to set up and run WordPress in an isolated - environment with Docker containers. - ## Awesome Compose samples The Awesome Compose samples provide a starting point on how to integrate different frameworks and technologies using Docker Compose. All samples are available in the [Awesome-compose GitHub repo](https://github.com/docker/awesome-compose) and are ready to run with `docker compose up`. diff --git a/content/manuals/desktop/_index.md b/content/manuals/desktop/_index.md index 17c87b9cfcd5..4162dcd39500 100644 --- a/content/manuals/desktop/_index.md +++ b/content/manuals/desktop/_index.md @@ -15,10 +15,14 @@ grid: [Windows](/desktop/setup/install/windows-install/), or [Linux](/desktop/setup/install/linux/). icon: download -- title: Explore Docker Desktop - description: Navigate Docker Desktop and learn about its key features. +- title: Learn about Docker Desktop + description: Navigate Docker Desktop. icon: feature_search link: /desktop/use-desktop/ +- title: Explore its key features + description: | + Find information about [Docker VMM](/desktop/features/vmm/), [WSL](/desktop/features/wsl/), [deploying on Kubernetes](/desktop/features/kubernetes/), and more. + icon: category - title: View the release notes description: Find out about new features, improvements, and bug fixes. icon: note_add @@ -27,11 +31,6 @@ grid: description: Explore general FAQs or FAQs for specific platforms. icon: help link: /desktop/troubleshoot-and-support/faqs/general/ -- title: Find additional resources - description: | - Find information on networking features, deploying on Kubernetes, and more. - icon: category - link: /desktop/features/kubernetes/ - title: Give feedback description: Provide feedback on Docker Desktop or Docker Desktop features. icon: sms @@ -51,18 +50,22 @@ It provides a straightforward GUI (Graphical User Interface) that lets you manag Docker Desktop reduces the time spent on complex setups so you can focus on writing code. It takes care of port mappings, file system concerns, and other default settings, and is regularly updated with bug fixes and security updates. +Docker Desktop integrates with your preferred development tools and languages, and gives you access to a vast ecosystem of trusted images and templates via Docker Hub. This empowers teams to accelerate development, automate builds, enable CI/CD workflows, and collaborate securely through shared repositories. + {{< tabs >}} {{< tab name="What's included in Docker Desktop?" >}} - [Docker Engine](/manuals/engine/_index.md) - Docker CLI client -- [Docker Scout](../scout/_index.md) (additional subscription may apply) +- [Docker Scout](../scout/_index.md) - [Docker Build](/manuals/build/_index.md) -- [Docker Extensions](../extensions/_index.md) - [Docker Compose](/manuals/compose/_index.md) +- [Ask Gordon](/manuals/ai/gordon/_index.md) +- [Docker Extensions](../extensions/_index.md) - [Docker Content Trust](/manuals/engine/security/trust/_index.md) - [Kubernetes](https://github.com/kubernetes/kubernetes/) - [Credential Helper](https://github.com/docker/docker-credential-helpers/) +- [Docker Offload](/manuals/offload/_index.md) {{< /tab >}} {{< tab name="What are the key features of Docker Desktop?">}} @@ -78,10 +81,4 @@ Docker Desktop reduces the time spent on complex setups so you can focus on writ {{< /tab >}} {{< /tabs >}} -Docker Desktop works with your choice of development tools and languages and -gives you access to a vast library of certified images and templates in -[Docker Hub](https://hub.docker.com/). This allows development teams to extend -their environment to rapidly auto-build, continuously integrate, and collaborate -using a secure repository. - {{< grid >}} diff --git a/content/manuals/desktop/enterprise/_index.md b/content/manuals/desktop/enterprise/_index.md index ccd1d127952b..28de27c22b9d 100644 --- a/content/manuals/desktop/enterprise/_index.md +++ b/content/manuals/desktop/enterprise/_index.md @@ -18,6 +18,6 @@ aliases: Docker Desktop Enterprise (DDE) has been deprecated and is no longer in active development. Please use [Docker Desktop](../_index.md) Community instead. -If you are an existing DDE customer, use our [Support form](https://hub.docker.com/support/desktop/) to request a transition to one of our new [subscription plans](https://www.docker.com/pricing). +If you are an existing DDE customer, use our [Support form](https://hub.docker.com/support/desktop/) to request a transition to one of our new [subscriptions](https://www.docker.com/pricing). If you are looking to deploy Docker Desktop at scale, contact us on [pricingquestions@docker.com](mailto:pricingquestions@docker.com). diff --git a/content/manuals/desktop/features/containerd.md b/content/manuals/desktop/features/containerd.md index e78c4448323a..34a0b081ed3a 100644 --- a/content/manuals/desktop/features/containerd.md +++ b/content/manuals/desktop/features/containerd.md @@ -1,6 +1,6 @@ --- title: containerd image store -weight: 10 +weight: 80 description: How to activate the containerd integration feature in Docker Desktop keywords: Docker, containerd, engine, image store, lazy-pull toc_max: 3 @@ -8,37 +8,31 @@ aliases: - /desktop/containerd/ --- -This page provides information about the ongoing integration of `containerd` for -image and file system management in the Docker Engine. +Docker Desktop is transitioning to use containerd for image and filesystem management. This page outlines the benefits, setup process, and new capabilities enabled by the containerd image store. > [!NOTE] > -> Images and containers are not shared between the classic image store and the -> new containerd image store. When you switch image stores, containers and -> images from the inactive store remain but are hidden until you switch back. +> Docker Desktop maintains separate image stores for the classic and containerd image stores. +> When switching between them, images and containers from the inactive store remain on disk but are hidden until you switch back. -## What is containerd? +## What is `containerd`? -`containerd` is an abstraction of the low-level kernel features -used to run and manage containers on a system. -It's a platform used in container software like Docker and Kubernetes. +`containerd` is a container runtime that provides a lightweight, consistent interface for container lifecycle management. It is already used under the hood by Docker Engine for creating, starting, and stopping containers. -Docker Engine already uses `containerd` for container lifecycle management, -which includes creating, starting, and stopping containers. -This page describes the next step of the containerd integration for Docker: -the containerd image store. +Docker Desktop’s ongoing integration of containerd now extends to the image store, offering more flexibility and modern image support. -## Image store +## What is the `containerd` image store? The image store is the component responsible for pushing, pulling, and storing images on the filesystem. + The classic Docker image store is limited in the types of images that it supports. For example, it doesn't support image indices, containing manifest lists. When you create multi-platform images, for example, the image index resolves all the platform-specific variants of the image. An image index is also required when building images with attestations. -The containerd image store extends range of image types +The containerd image store extends the range of image types that the Docker Engine can natively interact with. While this is a low-level architectural change, it's a prerequisite for unlocking a range of new use cases, including: @@ -65,7 +59,7 @@ To manually enable this feature in Docker Desktop: 1. Navigate to **Settings** in Docker Desktop. 2. In the **General** tab, check **Use containerd for pulling and storing images**. -3. Select **Apply & Restart**. +3. Select **Apply**. To disable the containerd image store, clear the **Use containerd for pulling and storing images** checkbox. @@ -88,8 +82,4 @@ and load them to your local image store: -## Feedback -Thanks for trying the new features available with `containerd`. Give feedback or -report any bugs you may find through the issues tracker on the -[feedback form](https://dockr.ly/3PODIhD). diff --git a/content/manuals/desktop/features/desktop-cli.md b/content/manuals/desktop/features/desktop-cli.md index b2d8524f27bb..798009755976 100644 --- a/content/manuals/desktop/features/desktop-cli.md +++ b/content/manuals/desktop/features/desktop-cli.md @@ -1,23 +1,18 @@ --- -title: Using the Docker Desktop CLI +title: Use the Docker Desktop CLI linkTitle: Docker Desktop CLI -weight: 120 +weight: 100 description: How to use the Docker Desktop CLI keywords: cli, docker desktop, macos, windows, linux -params: - sidebar: - badge: - color: green - text: New --- {{< summary-bar feature_name="Docker Desktop CLI" >}} -The Docker Desktop CLI lets you perform key operations such as starting, stopping, restarting, and checking the status of Docker Desktop directly from the command line. It is available with Docker Desktop version 4.37 and later. +The Docker Desktop CLI lets you perform key operations such as starting, stopping, restarting, and updating Docker Desktop directly from the command line. The Docker Desktop CLI provides: -- Enhanced automation and CI/CD integration: Perform Docker Desktop operations directly in CI/CD pipelines for better workflow automation. +- Simplified automation for local development: Execute Docker Desktop operations more efficiently in scripts and tests. - An improved developer experience: Restart, quit, or reset Docker Desktop from the command line, reducing dependency on the Docker Desktop Dashboard and improving flexibility and efficiency. ## Usage @@ -35,6 +30,12 @@ docker desktop COMMAND [OPTIONS] | `restart` | Restarts Docker Desktop | | `status` | Displays whether Docker Desktop is running or stopped. | | `engine ls` | Lists available engines (Windows only) | -| `engine use` | Switch between Linux and Windows containers (Windows only) | +| `engine use` | Switch between Linux and Windows containers (Windows only) | +| `update` | Manage Docker Desktop updates. Available for Mac only with Docker Desktop version 4.38, or all OSs with Docker Desktop version 4.39 and later. | +| `logs` | Print log entries | +| `disable` | Disable a feature | +| `enable` | Enable a feature | +| `version` | Show the Docker Desktop CLI plugin version information | +| `module` | Manage Docker Desktop modules | For more details on each command, see the [Docker Desktop CLI reference](/reference/cli/docker/desktop/_index.md). diff --git a/content/manuals/desktop/features/dev-environments/_index.md b/content/manuals/desktop/features/dev-environments/_index.md deleted file mode 100644 index a4df3143c6fc..000000000000 --- a/content/manuals/desktop/features/dev-environments/_index.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -description: Dev Environments -keywords: Dev Environments, share, local, Compose -title: Overview of Dev Environments -linkTitle: Dev Environments (Beta) -weight: 40 -aliases: -- /desktop/dev-environments/ ---- - -{{< include "dev-envs-changing.md" >}} - -{{< summary-bar feature_name="Dev Environments" >}} - -Dev Environments let you create a configurable developer environment with all the code and tools you need to quickly get up and running. - -It uses tools built into code editors that allows Docker to access code mounted into a container rather than on your local host. This isolates the tools, files and running services on your machine allowing multiple versions of them to exist side by side. - -You can use Dev Environments through the intuitive GUI in Docker Desktop Dashboard or straight from your terminal with the new [`docker dev` CLI plugin](dev-cli.md). - -## Use Dev Environments - -To use Dev Environments: -1. Navigate to the **Features in Development** tab in **Settings**. -2. On the **Beta** tab, select **Turn on Dev Environments**. -3. Select **Apply & restart**. - -The Dev Environments tab is now visible in Docker Desktop Dashboard. - -## How does it work? - ->**Changes to Dev Environments with Docker Desktop 4.13** -> ->Docker has simplified how you configure your dev environment project. All you need to get started is a `compose-dev.yaml` file. If you have an existing project with a `.docker/` folder this is automatically migrated the next time you launch. - -Dev Environments is powered by [Docker Compose](/compose/). This allows Dev Environments to take advantage of all the benefits and features of Compose whilst adding an intuitive GUI where you can launch environments with the click of a button. - -Every dev environment you want to run needs a `compose-dev.yaml` file which configures your application's services and lives in your project directory. You don't need to be an expert in Docker Compose or write a `compose-dev.yaml` file from scratch as Dev Environments creates a starter `compose-dev.yaml` files based on the main language in your project. - -You can also use the many [sample dev environments](https://github.com/docker/awesome-compose) as a starting point for how to integrate different services. Alternatively, see [Set up a dev environment](set-up.md) for more information. - -## What's next? - -Learn how to: -- [Launch a dev environment](create-dev-env.md) -- [Set up a dev environment](set-up.md) -- [Distribute your dev environment](share.md) diff --git a/content/manuals/desktop/features/dev-environments/create-dev-env.md b/content/manuals/desktop/features/dev-environments/create-dev-env.md deleted file mode 100644 index c0a2bc324465..000000000000 --- a/content/manuals/desktop/features/dev-environments/create-dev-env.md +++ /dev/null @@ -1,132 +0,0 @@ ---- -description: Dev Environments -keywords: Dev Environments, share, Docker Desktop, Compose, launch -title: Launch a dev environment -aliases: -- /desktop/dev-environments/create-compose-dev-env/ -- /desktop/dev-environments/create-dev-env/ -weight: 10 ---- - -{{< include "dev-envs-changing.md" >}} - -You can launch a dev environment from a: -- Git repository -- Branch or tag of a Git repository -- Sub-folder of a Git repository -- Local folder - -This does not conflict with any of the local files or local tooling set up on your host. - ->Tip -> ->Install the [Dev Environments browser extension](https://github.com/docker/dev-envs-extension) for [Chrome](https://chrome.google.com/webstore/detail/docker-dev-environments/gnagpachnalcofcblcgdbofnfakdbeka) or [Firefox](https://addons.mozilla.org/en-US/firefox/addon/docker-dev-environments/), to launch a dev environment faster. - -## Prerequisites - -To get started with Dev Environments, you must also install the following tools and extension on your machine: - -- [Git](https://git-scm.com). Make sure add Git to your PATH if you're a Windows user. -- [Visual Studio Code](https://code.visualstudio.com/) -- [Visual Studio Code Remote Containers Extension](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) - - After Git is installed, restart Docker Desktop. Select **Quit Docker Desktop**, and then start it again. - -## Launch a dev environment from a Git repository - -> [!NOTE] -> -> When cloning a Git repository using SSH, ensure you've added your SSH key to the ssh-agent. To do this, open a terminal and run `ssh-add `. - -> [!IMPORTANT] -> -> If you have enabled the WSL 2 integration in Docker Desktop for Windows, make sure you have an SSH agent running in your WSL 2 distribution. - -{{< accordion title="How to start an SSH agent in WSL 2" >}} - -If your WSL 2 distribution doesn't have an `ssh-agent` running, you can append this script at the end of your profile file (that is: ~/.profile, ~/.zshrc, ...). - -```bash -SSH_ENV="$HOME/.ssh/agent-environment" -function start_agent { - echo "Initializing new SSH agent..." - /usr/bin/ssh-agent | sed 's/^echo/#echo/' > "${SSH_ENV}" - echo succeeded - chmod 600 "${SSH_ENV}" - . "${SSH_ENV}" > /dev/null -} -# Source SSH settings, if applicable -if [ -f "${SSH_ENV}" ]; then - . "${SSH_ENV}" > /dev/null - ps -ef | grep ${SSH_AGENT_PID} | grep ssh-agent$ > /dev/null || { - start_agent; - } -else - start_agent; -fi -``` - -{{< /accordion >}} - -To launch a dev environment: - -1. From the **Dev Environments** tab in Docker Dashboard, select **Create**. The **Create a Dev Environment** dialog displays. -2. Select **Get Started**. -3. Optional: Provide a name for you dev environment. -4. Select **Existing Git repo** as the source and then paste your Git repository link into the field provided. -5. Choose your IDE. You can choose either: - - **Visual Studio Code**. The Git repository is cloned into a Volume and attaches to your containers. This allows you to develop directly inside of them using Visual Studio Code. - - **Other**. The Git repository is cloned into your chosen local directory and attaches to your containers as a bind mount. This shares the directory from your computer to the container, and allows you to develop using any local editor or IDE. -6. Select **Continue**. - -To launch the application, run the command `make run` in your terminal. This opens an http server on port 8080. Open [http://localhost:8080](http://localhost:8080) in your browser to see the running application. - - -## Launch from a specific branch or tag - -You can launch a dev environment from a specific branch, for example a branch corresponding to a Pull Request, or a tag by adding `@mybranch` or `@tag` as a suffix to your Git URL: - - `https://github.com/dockersamples/single-dev-env@mybranch` - - or - - `git@github.com:dockersamples/single-dev-env.git@mybranch` - -Docker then clones the repository with your specified branch or tag. - -## Launch from a subfolder of a Git repository - ->Note -> ->Currently, Dev Environments is not able to detect the main language of the subdirectory. You need to define your own base image or services in a `compose-dev.yaml`file located in your subdirectory. For more information on how to configure, see the [React application with a Spring backend and a MySQL database sample](https://github.com/docker/awesome-compose/tree/master/react-java-mysql) or the [Go server with an Nginx proxy and a Postgres database sample](https://github.com/docker/awesome-compose/tree/master/nginx-golang-postgres). - -1. From **Dev Environments** in Docker Dashboard, select **Create**. The **Create a Dev Environment** dialog displays. -2. Select **Get Started**. -3. Optional: Provide a name for you dev environment. -4. Select **Existing Git repo** as the source and then paste the link of your Git repo subfolder into the field provided. -5. Choose your IDE. You can choose either: - - **Visual Studio Code**. The Git repository is cloned into a Volume and attaches to your containers. This allows you to develop directly inside of them using Visual Studio Code. - - **Other**. The Git repository is cloned into your chosen local directory and attaches to your containers as a bind mount. This shares the directory from your computer to the container, and allows you to develop using any local editor or IDE. -6. Select **Continue**. - -To launch the application, run the command `make run` in your terminal. This opens an http server on port 8080. Open [http://localhost:8080](http://localhost:8080) in your browser to see the running application. - -## Launch from a local folder - -1. From **Dev Environments** in Docker Dashboard, select **Create**. The **Create a Dev Environment** dialog displays. -2. Select **Get Started**. -3. Optional: Provide a name for your dev environment. -4. Choose **Local directory** as the source. -5. Select **Select** to open the root directory of the code that you would like to work on. - - A directory from your computer is bind mounted to the container, so any changes you make locally is reflected in the dev environment. You can use an editor or IDE of your choice. - -> [!NOTE] -> -> When using a local folder for a dev environment, file changes are synchronized between your environment container and your local files. This can affect the performance inside the container, depending on the number of files in your local folder and the operations performed in the container. - -## What's next? - -Learn how to: -- [Set up a dev environment](set-up.md) -- [Distribute your dev environment](share.md) diff --git a/content/manuals/desktop/features/dev-environments/dev-cli.md b/content/manuals/desktop/features/dev-environments/dev-cli.md deleted file mode 100644 index c7d51bb0bf9f..000000000000 --- a/content/manuals/desktop/features/dev-environments/dev-cli.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -description: Set up a dev Environments -keywords: Dev Environments, share, docker dev, Docker Desktop -title: Use the docker dev CLI plugin -aliases: -- /desktop/dev-environments/dev-cli/ ---- - -{{< include "dev-envs-changing.md" >}} - -Use the new `docker dev` CLI plugin to get the full Dev Environments experience from the terminal in addition to the Dashboard. - -It is available with [Docker Desktop 4.13.0 and later](/manuals/desktop/release-notes.md). - -### Usage - -```bash -docker dev [OPTIONS] COMMAND -``` - -### Commands - -| Command | Description | -|:---------------------|:-----------------------------------------| -| `check` | Check Dev Environments | -| `create` | Create a new dev environment | -| `list` | Lists all dev environments | -| `logs` | Traces logs from a dev environment | -| `open` | Open Dev Environment with the IDE | -| `rm` | Removes a dev environment | -| `start` | Starts a dev environment | -| `stop` | Stops a dev environment | -| `version` | Shows the Docker Dev version information | - -### `docker dev check` - -#### Usage - -`docker dev check [OPTIONS]` - -#### Options - -| Name, shorthand | Description | -|:---------------------|:------------------------------------| -| `--format`,`-f` | Format the output. | - -### `docker dev create` - -#### Usage - -`docker dev create [OPTIONS] REPOSITORY_URL` - -#### Options - -| Name, shorthand | Description | -|:---------------------|:----------------------------------------------------------| -| `--detach`,`-d` | Detach creates a Dev Env without attaching to it's logs. | -| `--open`,`-o` | Open IDE after a successful creation | - -### `docker dev list` - -#### Usage - -`docker dev list [OPTIONS]` - -#### Options - -| Name, shorthand | Description | -|:---------------------|:------------------------------| -| `--format`,`-f` | Format the output | -| `--quiet`,`-q` | Only show dev environments names | - -### `docker dev logs` - -#### Usage - -`docker dev logs [OPTIONS] DEV_ENV_NAME` - -### `docker dev open` - -#### Usage - -`docker dev open DEV_ENV_NAME CONTAINER_REF [OPTIONS]` - -#### Options - -| Name, shorthand | Description | -|:---------------------|:----------------------| -| `--editor`,`-e` | Editor. | - -### `docker dev rm` - -#### Usage - -`docker dev rm DEV_ENV_NAME` - -### `docker dev start` - -#### Usage - -`docker dev start DEV_ENV_NAME` - -### `docker dev stop` - -#### Usage - -`docker dev stop DEV_ENV_NAME` - -### `docker dev version` - -#### Usage - -`docker dev version [OPTIONS]` - -#### Options - -| Name, shorthand | Description | -|:---------------------|:----------------------------------------------| -| `--format`,`-f` | Format the output. | -| `--short`,`-s` | Shows only Docker Dev's version number. | diff --git a/content/manuals/desktop/features/dev-environments/set-up.md b/content/manuals/desktop/features/dev-environments/set-up.md deleted file mode 100644 index 7c1fd89982d6..000000000000 --- a/content/manuals/desktop/features/dev-environments/set-up.md +++ /dev/null @@ -1,92 +0,0 @@ ---- -description: Set up a dev Environments -keywords: Dev Environments, share, set up, Compose, Docker Desktop -title: Set up a dev environment -weight: 20 -aliases: -- /desktop/dev-environments/set-up/ ---- - -{{< include "dev-envs-changing.md" >}} - ->**Changes to Dev Environments with Docker Desktop 4.13** -> ->Docker has simplified how you configure your dev environment project. All you need to get started is a `compose-dev.yaml` file. If you have an existing project with a `.docker/` folder this is automatically migrated the next time you launch. -> -> If you are using `.docker/docker-compose.yaml`, we move it to `../compose-dev.yaml`. ->If you are using `.docker/config.json`, we create a `../compose-dev.yaml` file with a single service named "app”. It is configured to use the image or Dockerfile referenced in the JSON as a starting point. - -To set up a dev environment, there are additional configuration steps to tell Docker Desktop how to build, start, and use the right image for your services. - -Dev Environments use an `compose-dev.yaml` file located at the root of your project. This file allows you to define the image required for a dedicated service, the ports you'd like to expose, along with additional configuration options. - -The following is an example `compose-dev.yaml` file. - -```yaml -version: "3.7" -services: - backend: - build: - context: backend - target: development - secrets: - - db-password - depends_on: - - db - db: - image: mariadb - restart: always - healthcheck: - test: [ "CMD", "mysqladmin", "ping", "-h", "127.0.0.1", "--silent" ] - interval: 3s - retries: 5 - start_period: 30s - secrets: - - db-password - volumes: - - db-data:/var/lib/mysql - environment: - - MYSQL_DATABASE=example - - MYSQL_ROOT_PASSWORD_FILE=/run/secrets/db-password - expose: - - 3306 - proxy: - build: proxy - ports: - - 8080:80 - depends_on: - - backend -volumes: - db-data: -secrets: - db-password: - file: db/password.txt -``` - -In the yaml file, the build context `backend` specifies that that the container should be built using the `development` stage (`target` attribute) of the Dockerfile located in the `backend` directory (`context` attribute) - -The `development` stage of the Dockerfile is defined as follows: - -```dockerfile -# syntax=docker/dockerfile:1 -FROM golang:1.16-alpine AS build -WORKDIR /go/src/github.com/org/repo -COPY . . -RUN go build -o server . -FROM build AS development -RUN apk update \ - && apk add git -CMD ["go", "run", "main.go"] -FROM alpine:3.12 -EXPOSE 8000 -COPY --from=build /go/src/github.com/org/repo/server /server -CMD ["/server"] -``` - -The `development` target uses a `golang:1.16-alpine` image with all dependencies you need for development. You can start your project directly from VS Code and interact with the others applications or services such as the database or the frontend. - -In the example, the Docker Compose files are the same. However, they could be different and the services defined in the main Compose file may use other targets to build or directly reference other images. - -## What's next? - -Learn how to [distribute your dev environment](share.md) diff --git a/content/manuals/desktop/features/dev-environments/share.md b/content/manuals/desktop/features/dev-environments/share.md deleted file mode 100644 index 79f8703def30..000000000000 --- a/content/manuals/desktop/features/dev-environments/share.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -description: Dev Environments -keywords: Dev Environments, share, Docker Desktop -title: Distribute your dev environment -weight: 30 -aliases: -- /desktop/dev-environments/share/ ---- - -{{< include "dev-envs-changing.md" >}} - -The `compose-dev.yaml` config file makes distributing your dev environment easy so everyone can access the same code and any dependencies. - -### Distribute your dev environment - -When you are ready to share your environment, simply copy the link to the Github repo where your project is stored, and share the link with your team members. - -You can also create a link that automatically starts your dev environment when opened. This can then be placed on a GitHub README or pasted into a Slack channel, for example. - -To create the link simply join the following link with the link to your dev environment's GitHub repository: - -`https://open.docker.com/dashboard/dev-envs?url=` - -The following example opens a [Compose sample](https://github.com/docker/awesome-compose/tree/master/nginx-golang-mysql), a Go server with an Nginx proxy and a MariaDB/MySQL database, in Docker Desktop. - -[https://open.docker.com/dashboard/dev-envs?url=https://github.com/docker/awesome-compose/tree/master/nginx-golang-mysql](https://open.docker.com/dashboard/dev-envs?url=https://github.com/docker/awesome-compose/tree/master/nginx-golang-mysql) - -### Open a dev environment that has been distributed to you - -To open a dev environment that has been shared with you, select the **Create** button in the top right-hand corner, select source **Existing Git repo**, and then paste the URL. diff --git a/content/manuals/desktop/features/gpu.md b/content/manuals/desktop/features/gpu.md index 494d178291dd..6e69184205fe 100644 --- a/content/manuals/desktop/features/gpu.md +++ b/content/manuals/desktop/features/gpu.md @@ -1,7 +1,7 @@ --- -title: GPU support in Docker Desktop +title: GPU support in Docker Desktop for Windows linkTitle: GPU support -weight: 80 +weight: 40 description: How to use GPU in Docker Desktop keywords: gpu, gpu support, nvidia, wsl2, docker desktop, windows toc_max: 3 @@ -13,22 +13,27 @@ aliases: > > Currently GPU support in Docker Desktop is only available on Windows with the WSL2 backend. -## Using NVIDIA GPUs with WSL2 +Docker Desktop for Windows supports NVIDIA GPU Paravirtualization (GPU-PV) on NVIDIA GPUs, allowing containers to access GPU resources for compute-intensive workloads like AI, machine learning, or video processing. -Docker Desktop for Windows supports WSL 2 GPU Paravirtualization (GPU-PV) on NVIDIA GPUs. To enable WSL 2 GPU Paravirtualization, you need: +## Prerequisites -- A machine with an NVIDIA GPU +To enable WSL 2 GPU Paravirtualization, you need: + +- A Windows machine with an NVIDIA GPU - Up to date Windows 10 or Windows 11 installation - [Up to date drivers](https://developer.nvidia.com/cuda/wsl) from NVIDIA supporting WSL 2 GPU Paravirtualization - The latest version of the WSL 2 Linux kernel. Use `wsl --update` on the command line - To make sure the [WSL 2 backend is turned on](wsl/_index.md#turn-on-docker-desktop-wsl-2) in Docker Desktop -To validate that everything works as expected, execute a `docker run` command with the `--gpus=all` flag. For example, the following will run a short benchmark on your GPU: +## Validate GPU support + +To confirm GPU access is working inside Docker, run the following: ```console $ docker run --rm -it --gpus=all nvcr.io/nvidia/k8s/cuda-sample:nbody nbody -gpu -benchmark ``` -The output will be similar to: + +This runs an n-body simulation benchmark on the GPU. The output will be similar to: ```console Run "nbody -benchmark [-numbodies=]" to measure performance. @@ -58,9 +63,16 @@ GPU Device 0: "GeForce RTX 2060 with Max-Q Design" with compute capability 7.5 = 2724.379 single-precision GFLOP/s at 20 flops per interaction ``` -Or if you wanted to try something more useful you could use the official [Ollama image](https://hub.docker.com/r/ollama/ollama) to run the Llama2 large language model. +## Run a real-world model: Llama2 with Ollama + +Use the [official Ollama image](https://hub.docker.com/r/ollama/ollama) to run the Llama2 LLM with GPU acceleration: ```console $ docker run --gpus=all -d -v ollama:/root/.ollama -p 11434:11434 --name ollama ollama/ollama +``` + +Then start the model: + +```console $ docker exec -it ollama ollama run llama2 ``` diff --git a/content/manuals/desktop/features/kubernetes.md b/content/manuals/desktop/features/kubernetes.md index 7ad1a0415719..0be5c1d0ccf5 100644 --- a/content/manuals/desktop/features/kubernetes.md +++ b/content/manuals/desktop/features/kubernetes.md @@ -2,6 +2,7 @@ description: See how you can deploy to Kubernetes on Docker Desktop keywords: deploy, kubernetes, kubectl, orchestration, Docker Desktop title: Deploy on Kubernetes with Docker Desktop +linkTitle: Deploy on Kubernetes aliases: - /docker-for-windows/kubernetes/ - /docker-for-mac/kubernetes/ @@ -9,15 +10,17 @@ aliases: weight: 60 --- -Docker Desktop includes a standalone Kubernetes server and client, as well as Docker CLI integration, enabling local Kubernetes development and testing directly on your machine. +Docker Desktop includes a standalone Kubernetes server and client, as well as Docker CLI integration, enabling local Kubernetes development and testing directly on your machine. -The Kubernetes server runs as a single-node cluster within a Docker container. This lightweight setup helps you explore Kubernetes features, test workloads, and work with container orchestration in parallel with other Docker functionalities. +The Kubernetes server runs as a single or multi-node cluster, within Docker container(s). This lightweight setup helps you explore Kubernetes features, test workloads, and work with container orchestration in parallel with other Docker functionalities. Kubernetes on Docker Desktop runs alongside other workloads, including Swarm services and standalone containers. +![k8s settings](../images/k8s-settings.png) + ## What happens when I enable Kubernetes in Docker Desktop? -When you enable Kubernetes in Docker Desktop, the following actions are triggered in the Docker Desktop backend and VM: +The following actions are triggered in the Docker Desktop backend and VM: - Generation of certificates and cluster configuration - Download and installation of Kubernetes internal components @@ -29,15 +32,18 @@ Turning the Kubernetes server on or off in Docker Desktop does not affect your o ## Install and turn on Kubernetes 1. Open the Docker Desktop Dashboard and navigate to **Settings**. -2. Select the **Kubernetes** tab. -3. Select the **Enable Kubernetes** checkbox. -4. Select **Apply & Restart** to save the settings and then select **Install** to confirm. This sets up the images required to run the Kubernetes server as containers, and installs the `kubectl` command-line tool on your system at `/usr/local/bin/kubectl` (Mac) or `C:\Program Files\Docker\Docker\Resources\bin\kubectl.exe` (Windows). +2. Select the **Kubernetes** tab. +3. Toggle on **Enable Kubernetes**. +4. Choose your [cluster provisioning method](#cluster-provisioning-method). +5. Select **Apply** to save the settings. + +This sets up the images required to run the Kubernetes server as containers, and installs the `kubectl` command-line tool on your system at `/usr/local/bin/kubectl` (Mac) or `C:\Program Files\Docker\Docker\resources\bin\kubectl.exe` (Windows). > [!NOTE] > > Docker Desktop for Linux does not include `kubectl` by default. You can install it separately by following the [Kubernetes installation guide](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/). Ensure the `kubectl` binary is installed at `/usr/local/bin/kubectl`. -When Kubernetes is enabled, its status is displayed in the Docker Desktop Dashboard footer and the Docker menu. +When Kubernetes is enabled, its status is displayed in the Docker Desktop Dashboard footer and the Docker menu. You can check which version of Kubernetes you're on with: @@ -45,11 +51,35 @@ You can check which version of Kubernetes you're on with: $ kubectl version ``` -### Viewing system containers +### Cluster provisioning method -By default, Kubernetes system containers are hidden. To inspect these containers, navigate to **Settings** > **Kubernetes** and then enable **Show system containers (advanced)**. +Docker Desktop Kubernetes can be provisioned with either the `kubeadm` or `kind` +provisioners. -You can now view the running Kubernetes containers with `docker ps` or in the Docker Desktop Dashboard. +`kubeadm` is the older provisioner. It supports a single-node cluster, you can't select the kubernetes +version, it's slower to provision than `kind`, and it's not supported by [Enhanced Container Isolation](/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/index.md) (ECI), +meaning that if ECI is enabled the cluster works but it's not protected by ECI. + +`kind` is the newer provisioner, and it's available if you are signed in and are +using Docker Desktop version 4.38 or later. It supports multi-node clusters (for +a more realistic Kubernetes setup), you can choose the Kubernetes version, it's +faster to provision than `kubeadm`, and it's supported by ECI (i.e., when ECI is +enabled, the Kubernetes cluster runs in unprivileged Docker containers, thus +making it more secure). Note however that `kind` requires that Docker Desktop be +configured to use the [containerd image store](containerd.md) (the default image +store in Docker Desktop 4.34 and later). + +The following table summarizes this comparison. + +| Feature | `kubeadm` | `kind` | +| :------ | :-----: | :--: | +| Availability | Docker Desktop 4.0+ | Docker Desktop 4.38+ (requires sign in) | +| Multi-node cluster support | No | Yes | +| Kubernetes version selector | No | Yes | +| Speed to provision | ~1 min | ~30 seconds | +| Supported by ECI | No | Yes | +| Works with containerd image store | Yes | Yes | +| Works with Docker image store | Yes | No | ## Using the kubectl command @@ -69,7 +99,7 @@ $ kubectl config use-context docker-desktop > [!TIP] > > If the `kubectl` config get-contexts command returns an empty result, try: -> +> > - Running the command in the Command Prompt or PowerShell. > - Setting the `KUBECONFIG` environment variable to point to your `.kube/config` file. @@ -93,6 +123,106 @@ For more information about `kubectl`, see the Kubernetes clusters are not automatically upgraded with Docker Desktop updates. To upgrade the cluster, you must manually select **Reset Kubernetes Cluster** in settings. +## Additional settings + +### Viewing system containers + +By default, Kubernetes system containers are hidden. To inspect these containers, enable **Show system containers (advanced)**. + +You can now view the running Kubernetes containers with `docker ps` or in the Docker Desktop Dashboard. + +### Configuring a custom image registry for Kubernetes control plane images + +Docker Desktop uses containers to run the Kubernetes control plane. By default, Docker Desktop pulls +the associated container images from Docker Hub. The images pulled depend on the [cluster provisioning mode](#cluster-provisioning-method). + +For example, in `kind` mode it requires the following images: + +```console +docker.io/kindest/node: +docker.io/envoyproxy/envoy: +docker.io/docker/desktop-cloud-provider-kind: +docker.io/docker/desktop-containerd-registry-mirror: +``` + +In `kubeadm` mode it requires the following images: + +```console +docker.io/registry.k8s.io/kube-controller-manager: +docker.io/registry.k8s.io/kube-apiserver: +docker.io/registry.k8s.io/kube-scheduler: +docker.io/registry.k8s.io/kube-proxy +docker.io/registry.k8s.io/etcd: +docker.io/registry.k8s.io/pause: +docker.io/registry.k8s.io/coredns/coredns: +docker.io/docker/desktop-storage-provisioner: +docker.io/docker/desktop-vpnkit-controller: +docker.io/docker/desktop-kubernetes: +``` + +The image tags are automatically selected by Docker Desktop based on several +factors, including the version of Kubernetes being used. The tags vary for each image. + +To accommodate scenarios where access to Docker Hub is not allowed, admins can +configure Docker Desktop to pull the above listed images from a different registry (e.g., a mirror) +using the [KubernetesImagesRepository](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md#kubernetes) setting as follows. + +An image name can be broken into `[registry[:port]/][namespace/]repository[:tag]` components. +The `KubernetesImagesRepository` setting allows users to override the `[registry[:port]/][namespace]` +portion of the image's name. + +For example, if Docker Desktop Kubernetes is configured in `kind` mode and +`KubernetesImagesRepository` is set to `my-registry:5000/kind-images`, then +Docker Desktop will pull the images from: + +```console +my-registry:5000/kind-images/node: +my-registry:5000/kind-images/envoy: +my-registry:5000/kind-images/desktop-cloud-provider-kind: +my-registry:5000/kind-images/desktop-containerd-registry-mirror: +``` + +These images should be cloned/mirrored from their respective images in Docker Hub. The tags must +also match what Docker Desktop expects. + +The recommended approach to set this up is the following: + +1) Start Docker Desktop. + +2) In Settings > Kubernetes, enable the *Show system containers* setting. + +3) In Settings > Kubernetes, start Kubernetes using the desired cluster provisioning method: `kubeadm` or `kind`. + +4) Wait for Kubernetes to start. + +5) Use `docker ps` to view the container images used by Docker Desktop for the Kubernetes control plane. + +6) Clone or mirror those images (with matching tags) to your custom registry. + +7) Stop the Kubernetes cluster. + +8) Configure the `KubernetesImagesRepository` setting to point to your custom registry. + +9) Restart Docker Desktop. + +10) Verify that the Kubernetes cluster is using the custom registry images using the `docker ps` command. + +> [!NOTE] +> +> The `KubernetesImagesRepository` setting only applies to control plane images used by Docker Desktop +> to set up the Kubernetes cluster. It has no effect on other Kubernetes pods. + +> [!NOTE] +> +> When using `KubernetesImagesRepository` and [Enhanced Container Isolation (ECI)](/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/_index.md) +> is enabled, add the following images to the [ECI Docker socket mount image list](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md#enhanced-container-isolation): +> +> * [imagesRepository]/desktop-cloud-provider-kind:* +> * [imagesRepository]/desktop-containerd-registry-mirror:* +> +> These containers mount the Docker socket, so you must add the images to the ECI images list. If not, +> ECI will block the mount and Kubernetes won't start. + ## Troubleshooting - If Kubernetes fails to start, make sure Docker Desktop is running with enough allocated resources. Check **Settings** > **Resources**. @@ -101,13 +231,13 @@ Kubernetes clusters are not automatically upgraded with Docker Desktop updates. $ kubectl config use-context docker-desktop ``` You can then try checking the logs of the [Kubernetes system containers](#viewing-system-containers) if you have enabled that setting. -- If you're experiencing cluster issues after updating, reset your Kubernetes cluster. Resetting a Kubernetes cluster can help resolve issues by essentially reverting the cluster to a clean state, and clearing out misconfigurations, corrupted data, or stuck resources that may be causing problems. If the issue still persists, you may need to clean and purge data, and then restart Docker Desktop. +- If you're experiencing cluster issues after updating, reset your Kubernetes cluster. Resetting a Kubernetes cluster can help resolve issues by essentially reverting the cluster to a clean state, and clearing out misconfigurations, corrupted data, or stuck resources that may be causing problems. If the issue still persists, you may need to clean and purge data, and then restart Docker Desktop. ## Turn off and uninstall Kubernetes To turn off Kubernetes in Docker Desktop: 1. From the Docker Desktop Dashboard, select the **Settings** icon. -2. Select the **Kubernetes** tab. +2. Select the **Kubernetes** tab. 3. Deselect the **Enable Kubernetes** checkbox. -4. Select **Apply & Restart** to save the settings. This stops and removes Kubernetes containers, and also removes the `/usr/local/bin/kubectl` command. +4. Select **Apply** to save the settings. This stops and removes Kubernetes containers, and also removes the `/usr/local/bin/kubectl` command. diff --git a/content/manuals/desktop/features/networking.md b/content/manuals/desktop/features/networking.md index 71156eebdf04..f2adbfe15787 100644 --- a/content/manuals/desktop/features/networking.md +++ b/content/manuals/desktop/features/networking.md @@ -2,6 +2,7 @@ description: Understand how networking works on Docker Desktop and see the known limitations keywords: networking, docker desktop, proxy, vpn, Linux, Mac, Windows title: Explore networking features on Docker Desktop +linkTitle: Networking aliases: - /desktop/linux/networking/ - /docker-for-mac/networking/ @@ -11,11 +12,10 @@ aliases: - /docker-for-windows/networking/ - /desktop/windows/networking/ - /desktop/networking/ -weight: 50 +weight: 30 --- -Docker Desktop provides several networking features to make it easier to -use. +Docker Desktop includes built-in networking capabilities to help you connect containers with services on your host, across containers, or through proxies and VPNs. ## Networking features for all platforms @@ -33,29 +33,29 @@ When you run a container with the `-p` argument, for example: $ docker run -p 80:80 -d nginx ``` -Docker Desktop makes whatever is running on port 80 in the container, in -this case, `nginx`, available on port 80 of `localhost`. In this example, the -host and container ports are the same. If, for example, you already have something running on port 80 of -your host machine, you can connect the container to a different port: +Docker Desktop makes whatever is running on port `80` in the container, in +this case, `nginx`, available on port `80` of `localhost`. In this example, the +host and container ports are the same. + +To avoid conflicts with services already using port `80` on the host: ```console $ docker run -p 8000:80 -d nginx ``` -Now, connections to `localhost:8000` are sent to port 80 in the container. The -syntax for `-p` is `HOST_PORT:CLIENT_PORT`. +Now connections to `localhost:8000` are sent to port `80` in the container. + +> [!TIP] +> +> The syntax for `-p` is `HOST_PORT:CLIENT_PORT`. ### HTTP/HTTPS Proxy support See [Proxies](/manuals/desktop/settings-and-maintenance/settings.md#proxies) -### SOCKS5 proxy support +### SOCKS5 proxy support -{{< introduced desktop 4.28.0 "../release-notes.md#4280" >}} - -> [!NOTE] -> -> Requires a Business subscription. +{{< summary-bar feature_name="SOCKS5 proxy support" >}} SOCKS (Socket Secure) is a protocol that facilitates the routing of network packets between a client and a server through a proxy server. It provides a way to enhance privacy, security, and network performance for users and applications. @@ -68,11 +68,139 @@ To enable and set up SOCKS proxy support: 3. Switch on the **Manual proxy configuration** toggle. 4. In the **Secure Web Server HTTPS** box, paste your `socks5://host:port` URL. +## Networking mode and DNS behaviour for Mac and Windows + +With Docker Desktop version 4.42 and later, you can customize how Docker handles container networking and DNS resolution to better support a range of environments — from IPv4-only to dual-stack and IPv6-only systems. These settings help prevent timeouts and connectivity issues caused by incompatible or misconfigured host networks. + +> [!NOTE] +> +> These settings can be overridden on a per-network basis using CLI flags or Compose file options. + +### Default networking mode + +Choose the default IP protocol used when Docker creates new networks. This allows you to align Docker with your host’s network capabilities or organizational requirements, such as enforcing IPv6-only access. + +The options available are: + +- **Dual IPv4/IPv6** (Default): Supports both IPv4 and IPv6. Most flexible and ideal for environments with dual-stack networking. +- **IPv4 only**: Only IPv4 addresses are used. Use this if your host or network does not support IPv6. +- **IPv6 only**: Only IPv6 addresses are used. Best for environments transitioning to or enforcing IPv6-only connectivity. + +> [!NOTE] +> +> This setting can be overridden on a per-network basis using CLI flags or Compose file options. + +### DNS resolution behavior + +Control how Docker filters DNS records returned to containers, improving reliability in environments where only IPv4 or IPv6 is supported. This setting is especially useful for preventing apps from trying to connect using IP families that aren't actually available, which can cause avoidable delays or failures. + +Depending on your selected network mode, the options available are: + +- **Auto (recommended)**: Docker detects your host's network stack and automatically filters out unsupported DNS record types (A for IPv4, AAAA for IPv6). +- **Filter IPv4 (A records)**: Prevents containers from resolving IPv4 addresses. Only available in dual-stack mode. +- **Filter IPv6 (AAAA records)**: Prevents containers from resolving IPv6 addresses. Only available in dual-stack mode. +- **No filtering**: Docker returns all DNS records (A and AAAA), regardless of host support. + +> [!IMPORTANT] +> +> Switching the default networking mode resets the DNS filter to Auto. + +### Using Settings Management + +If you're an administrator, you can use [Settings Management](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md#networking) to enforce this Docker Desktop setting across your developer's machines. Choose from the following code snippets and at it to your `admin-settings.json` file, +or configure this setting using the [Admin Console](/manuals/enterprise/security/hardened-desktop/settings-management/configure-admin-console.md) + +{{< tabs >}} +{{< tab name="Networking mode" >}} + +Dual IPv4/IPv6: + +```json +{ + "defaultNetworkingMode": { + "locked": true + "value": "dual-stack" + } +} +``` + +IPv4 only: + +```json +{ + "defaultNetworkingMode": { + "locked": true + "value": "ipv4only" + } +} +``` + +IPv6 only: + +```json +{ + "defaultNetworkingMode": { + "locked": true + "value": "ipv6only" + } +} +``` + +{{< /tab >}} +{{< tab name="DNS resolution" >}} + +Auto filter: + +```json +{ + "dnsInhibition": { + "locked": true + "value": "auto" + } +} +``` + +Filter IPv4: + +```json +{ + "dnsInhibition": { + "locked": true + "value": "ipv4" + } +} +``` + +Filter IPv6: + +```json +{ + "dnsInhibition": { + "locked": true + "value": "ipv6" + } +} +``` + +No filter: + +```json +{ + "dnsInhibition": { + "locked": true + "value": "none" + } +} +``` + +{{< /tab >}} +{{< /tabs >}} + ## Networking features for Mac and Linux ### SSH agent forwarding -Docker Desktop on Mac and Linux allows you to use the host’s SSH agent inside a container. To do this: +Docker Desktop for Mac and Linux lets you use the host’s SSH agent inside a container. To do this: 1. Bind mount the SSH agent socket by adding the following parameter to your `docker run` command: @@ -104,9 +232,9 @@ services: ### Changing internal IP addresses -The internal IP addresses used by Docker can be changed from **Settings**. After changing IPs, it is necessary to reset the Kubernetes cluster and to leave any active Swarm. +The internal IP addresses used by Docker can be changed from **Settings**. After changing IPs, you need to reset the Kubernetes cluster and to leave any active Swarm. -### There is no docker0 bridge on the host +### There is no `docker0` bridge on the host Because of the way networking is implemented in Docker Desktop, you cannot see a `docker0` interface on the host. This interface is actually within the @@ -127,7 +255,7 @@ However if you are a Windows user, per-container IP addressing is possible with ### I want to connect from a container to a service on the host The host has a changing IP address, or none if you have no network access. -We recommend that you connect to the special DNS name `host.docker.internal`, +Docker recommends you connect to the special DNS name `host.docker.internal`, which resolves to the internal IP address used by the host. You can also reach the gateway using `gateway.docker.internal`. @@ -154,7 +282,7 @@ If you have installed Python on your machine, use the following instructions as Port forwarding works for `localhost`. `--publish`, `-p`, or `-P` all work. Ports exposed from Linux are forwarded to the host. -We recommend you publish a port, or to connect from another +Docker recommends you publish a port, or to connect from another container. This is what you need to do even on Linux if the container is on an overlay network, not a bridge network, as these are not routed. diff --git a/content/manuals/desktop/features/synchronized-file-sharing.md b/content/manuals/desktop/features/synchronized-file-sharing.md index f424fe3291d6..d0ca03e66eb0 100644 --- a/content/manuals/desktop/features/synchronized-file-sharing.md +++ b/content/manuals/desktop/features/synchronized-file-sharing.md @@ -1,6 +1,6 @@ --- title: Synchronized file shares -weight: 30 +weight: 70 description: Get started with Synchronized file shares on Docker Desktop. keyword: mutagen, file sharing, docker desktop, bind mounts aliases: @@ -40,7 +40,7 @@ After creating a file share instance, any container using a bind mount that poin To create a file share instance: 1. Sign in to Docker Desktop. 2. In **Settings**, navigate to the **File sharing** tab within the **Resources** section. -3. In the **Synchronized File Shares** section, select the **Create share** icon. +3. In the **Synchronized file shares** section, select **Create share**. 4. Select a host folder to share. The synchronized file share should initialize and be usable. File shares take a few seconds to initialize as files are copied into the Docker Desktop VM. During this time, the status indicator displays **Preparing**. There is also a status icon in the footer of the Docker Desktop Dashboard that keeps you updated. @@ -53,7 +53,7 @@ When the status indicator displays **Watching for filesystem changes**, your fil > [!TIP] > -> Compose can now automatically create file shares for bind mounts. +> Docker Compose can automatically create file shares for bind mounts. > Ensure you're signed in to Docker with a paid subscription and have enabled both **Access experimental features** and **Manage Synchronized file shares with Compose** in Docker Desktop's settings. ## Explore your file share instance @@ -93,11 +93,3 @@ In general, use your `.syncignore` file to exclude items that aren't critical to - POSIX-style Windows paths are not supported. Avoid setting the [`COMPOSE_CONVERT_WINDOWS_PATHS`](/manuals/compose/how-tos/environment-variables/envvars.md#compose_convert_windows_paths) environment variable in Docker Compose. - If you don't have the correct permissions to create symbolic links and your container attempts to create symbolic links in your file share instance, an **unable to create symbolic link** error message displays. For Windows users, see Microsoft's [Create symbolic links documentation](https://learn.microsoft.com/en-us/previous-versions/windows/it-pro/windows-10/security/threat-protection/security-policy-settings/create-symbolic-links) for best practices and location of the **Create symbolic links** security policy setting. For Mac and Linux users, check that you have write permissions on the folder. - -## Feedback and support - -To give feedback or report bugs, visit: - -- [Docker Desktop for Mac issues on GitHub](https://github.com/docker/for-mac/issues) -- [Docker Desktop for Windows issues on GitHub](https://github.com/docker/for-win/issues) -- [Docker Desktop for Linux issues on GitHub](https://github.com/docker/desktop-linux/issues) diff --git a/content/manuals/desktop/features/usbip.md b/content/manuals/desktop/features/usbip.md index 3ed10113c923..4e833dd2e054 100644 --- a/content/manuals/desktop/features/usbip.md +++ b/content/manuals/desktop/features/usbip.md @@ -1,30 +1,21 @@ --- title: Using USB/IP with Docker Desktop linkTitle: USB/IP support -weight: 100 +weight: 50 description: How to use USB/IP in Docker Desktop keywords: usb, usbip, docker desktop, macos, windows, linux toc_max: 3 aliases: - /desktop/usbip/ -params: - sidebar: - badge: - color: green - text: New --- {{< summary-bar feature_name="USB/IP support" >}} -> [!NOTE] -> -> Available on Docker Desktop for Mac, Linux, and Windows with the Hyper-V backend. - USB/IP enables you to share USB devices over the network, which can then be accessed from within Docker containers. This page focuses on sharing USB devices connected to the machine you run Docker Desktop on. You can repeat the following process to attach and use additional USB devices as needed. > [!NOTE] > -> The Docker Desktop VM kernel image comes pre-configured with drivers for many common USB devices, but Docker can't guarantee every possible USB device will work with this setup. +> Docker Desktop includes built-in drivers for many common USB devices but Docker can't guarantee every possible USB device works with this setup. ## Setup and use @@ -53,6 +44,8 @@ To attach the USB device, start a privileged Docker container with the PID names $ docker run --rm -it --privileged --pid=host alpine ``` +`--privileged` gives the container full access to the host, and `--pid=host` allows it to share the host’s process namespace. + ### Step three: Enter the mount namespace of PID 1 Inside the container, enter the mount namespace of the `init` process to gain access to the pre-installed USB/IP tools: @@ -61,7 +54,7 @@ Inside the container, enter the mount namespace of the `init` process to gain ac $ nsenter -t 1 -m ``` -### Step four: Use USB/IP tools +### Step four: Use the USB/IP tools Now you can use the USB/IP tools as you would on any other system: @@ -107,7 +100,7 @@ Example output: event0 mice ``` -### Step five: Use the attached device in another container +### Step five: Access the device from another container While the initial container remains running to keep the USB device operational, you can access the attached device from another container. For example: diff --git a/content/manuals/desktop/features/vmm.md b/content/manuals/desktop/features/vmm.md index 474caa182d97..5e977f7c6aab 100644 --- a/content/manuals/desktop/features/vmm.md +++ b/content/manuals/desktop/features/vmm.md @@ -1,11 +1,6 @@ --- title: Virtual Machine Manager for Docker Desktop on Mac linkTitle: Virtual Machine Manager -params: - sidebar: - badge: - color: green - text: New keywords: virtualization software, resource allocation, mac, docker desktop, vm monitoring, vm performance, apple silicon description: Discover Docker Desktop for Mac's Virtual Machine Manager (VMM) options, including the new Docker VMM for Apple Silicon, offering enhanced performance and efficiency weight: 110 @@ -13,18 +8,18 @@ aliases: - /desktop/vmm/ --- -{{< summary-bar feature_name="VMM" >}} +Docker Desktop supports multiple Virtual Machine Managers (VMMs) to power the Linux VM that runs containers. You can choose the most suitable option based on your system architecture (Intel or Apple Silicon), performance needs, and feature requirements. This page provides an overview of the available options. -The Virtual Machine Manager (VMM) in Docker Desktop for Mac is responsible for creating and managing the virtual machine used to run containers. Depending on your system architecture and performance needs, you can choose from multiple VMM options in Docker Desktop's [settings](/manuals/desktop/settings-and-maintenance/settings.md#general). This page provides an overview of the available options. +To change the VMM, go to **Settings** > **General** > **Virtual Machine Manager**. ## Docker VMM -Docker VMM is a new, container-optimized hypervisor introduced in Docker Desktop 4.35 and available on Apple Silicon Macs only. Its enhanced speed and resource efficiency makes it an ideal choice for optimizing your workflow. +{{< summary-bar feature_name="VMM" >}} -Docker VMM brings exciting advancements specifically tailored for Apple Silicon machines. By optimizing both the Linux kernel and hypervisor layers, Docker VMM delivers significant performance enhancements across common developer tasks. +Docker VMM is a new, container-optimized hypervisor. By optimizing both the Linux kernel and hypervisor layers, Docker VMM delivers significant performance enhancements across common developer tasks. Some key performance enhancements provided by Docker VMM include: - - Faster I/O operations: With a cold cache, iterating over a large shared filesystem with `find` is 2x faster than when the Apple Virtualization Framework is used. + - Faster I/O operations: With a cold cache, iterating over a large shared filesystem with `find` is 2x faster than when the Apple Virtualization framework is used. - Improved caching: With a warm cache, performance can improve by as much as 25x, even surpassing native Mac operations. These improvements directly impact developers who rely on frequent file access and overall system responsiveness during containerized development. Docker VMM marks a significant leap in speed, enabling smoother workflows and faster iteration cycles. @@ -40,19 +35,19 @@ As Docker VMM is still in Beta, there are a few known limitations: - Docker VMM does not currently support Rosetta, so emulation of amd64 architectures is slow. Docker is exploring potential solutions. - Certain databases, like MongoDB and Cassandra, may fail when using virtiofs with Docker VMM. This issue is expected to be resolved in a future release. -## Apple Virtualization Framework +## Apple Virtualization framework -The Apple Virtualization Framework is a stable and well-established option for managing virtual machines on Mac. It has been a reliable choice for many Mac users over the years. This framework is best suited for developers who prefer a proven solution with solid performance and broad compatibility. +The Apple Virtualization framework is a stable and well-established option for managing virtual machines on Mac. It has been a reliable choice for many Mac users over the years. This framework is best suited for developers who prefer a proven solution with solid performance and broad compatibility. ## QEMU (Legacy) for Apple Silicon > [!NOTE] > -> QEMU will be deprecated in a future release. +> QEMU will be deprecated on July 14, 2025. For more information, see the [blog announcement](https://www.docker.com/blog/docker-desktop-for-mac-qemu-virtualization-option-to-be-deprecated-in-90-days/) QEMU is a legacy virtualization option for Apple Silicon Macs, primarily supported for older use cases. -Docker recommends transitioning to newer alternatives, such as Docker VMM or the Apple Virtualization Framework, as they offer superior performance and ongoing support. Docker VMM, in particular, offers substantial speed improvements and a more efficient development environment, making it a compelling choice for developers working with Apple Silicon. +Docker recommends transitioning to newer alternatives, such as Docker VMM or the Apple Virtualization framework, as they offer superior performance and ongoing support. Docker VMM, in particular, offers substantial speed improvements and a more efficient development environment, making it a compelling choice for developers working with Apple Silicon. Note that this is not related to using QEMU to emulate non-native architectures in [multi-platform builds](/manuals/build/building/multi-platform.md#qemu). diff --git a/content/manuals/desktop/features/wasm.md b/content/manuals/desktop/features/wasm.md index 07103eeb8c5a..b14de66ad6ae 100644 --- a/content/manuals/desktop/features/wasm.md +++ b/content/manuals/desktop/features/wasm.md @@ -1,22 +1,30 @@ --- -title: Wasm workloads (Beta) -weight: 20 +title: Wasm workloads +weight: 90 description: How to run Wasm workloads with Docker Desktop keywords: Docker, WebAssembly, wasm, containerd, engine toc_max: 3 aliases: - /desktop/wasm/ +params: + sidebar: + badge: + color: blue + text: Beta --- {{< summary-bar feature_name="Wasm workloads" >}} -Wasm (short for WebAssembly) is a fast, light alternative to the Linux and -Windows containers you’re using in Docker today (with -[some tradeoffs](https://www.docker.com/blog/docker-wasm-technical-preview/)). +WebAssembly (Wasm) is a fast, light alternative to Linux and +Windows containers. With Docker Desktop, you can now run Wasm workloads side by side with traditional containers. -This page provides information about the new ability to run Wasm applications +This page provides information about the ability to run Wasm applications alongside your Linux containers in Docker. +> [!TIP] +> +> Learn more about Wasm use cases and tradeoffs in the [Docker Wasm technical preview blog post](https://www.docker.com/blog/docker-wasm-technical-preview/). + ## Turn on Wasm workloads Wasm workloads require the [containerd image store](containerd.md) @@ -26,12 +34,10 @@ then pre-existing images and containers will be inaccessible. 1. Navigate to **Settings** in Docker Desktop. 2. In the **General** tab, check **Use containerd for pulling and storing images**. 3. Go to **Features in development** and check the **Enable Wasm** option. -4. Select **Apply & restart** to save the settings. +4. Select **Apply** to save the settings. 5. In the confirmation dialog, select **Install** to install the Wasm runtimes. -Docker Desktop downloads and installs the following runtimes that you can use -to run Wasm workloads: - +Docker Desktop downloads and installs the following runtimes: - `io.containerd.slight.v1` - `io.containerd.spin.v2` - `io.containerd.wasmedge.v1` @@ -87,7 +93,7 @@ Start the application using the normal Docker Compose commands: ### Running a multi-service application with Wasm -Networking works the same as you expect with Linux containers, giving you the +Networking works the same as you'd expect with Linux containers, giving you the flexibility to combine Wasm applications with other containerized workloads, such as a database, in a single application stack. @@ -206,16 +212,5 @@ Update your Docker Desktop to the latest version and try again. ## Known issues -- Docker Compose may not exit cleanly when interrupted - - Workaround: Clean up `docker-compose` processes by sending them a SIGKILL - (`killall -9 docker-compose`). -- Pushes to Hub might give an error stating - `server message: insufficient_scope: authorization failed`, even after logging - in using Docker Desktop - - Workaround: Run `docker login` in the CLI - -## Feedback - -Thanks for trying out Wasm workloads with Docker. Give feedback or report any -bugs you may find through the issues tracker on the -[public roadmap item](https://github.com/docker/roadmap/issues/426). +- Docker Compose may not exit cleanly when interrupted. As a workaround, clean up `docker-compose` processes by sending them a SIGKILL (`killall -9 docker-compose`). +- Pushes to Docker Hub might give an error stating `server message: insufficient_scope: authorization failed`, even after signing in through Docker Desktop. As a workaround, run `docker login` in the CLI diff --git a/content/manuals/desktop/features/wsl/_index.md b/content/manuals/desktop/features/wsl/_index.md index 74a5eae3e9d4..257032951deb 100644 --- a/content/manuals/desktop/features/wsl/_index.md +++ b/content/manuals/desktop/features/wsl/_index.md @@ -5,7 +5,7 @@ keywords: wsl, wsl2, installing wsl2, wsl installation, docker wsl2, wsl docker, tech preview, wsl install docker, install docker wsl, how to install docker in wsl title: Docker Desktop WSL 2 backend on Windows linkTitle: WSL -weight: 90 +weight: 120 aliases: - /docker-for-windows/wsl/ - /docker-for-windows/wsl-tech-preview/ @@ -23,7 +23,7 @@ Additionally, with WSL 2, the time required to start a Docker daemon after a col Before you turn on the Docker Desktop WSL 2 feature, ensure you have: -- At a minimum WSL version 1.1.3.0., but ideally the latest version of WSL to [avoid Docker Desktop not working as expected](best-practices.md). +- At a minimum WSL version 2.1.5, but ideally the latest version of WSL to [avoid Docker Desktop not working as expected](best-practices.md). - Met the Docker Desktop for Windows' [system requirements](/manuals/desktop/setup/install/windows-install.md#system-requirements). - Installed the WSL 2 feature on Windows. For detailed instructions, refer to the [Microsoft documentation](https://docs.microsoft.com/en-us/windows/wsl/install-win10). @@ -48,7 +48,7 @@ Before you turn on the Docker Desktop WSL 2 feature, ensure you have: 5. From the **General** tab, select **Use WSL 2 based engine**.. If you have installed Docker Desktop on a system that supports WSL 2, this option is turned on by default. -6. Select **Apply & Restart**. +6. Select **Apply**. Now `docker` commands work from Windows using the new WSL 2 engine. @@ -92,7 +92,7 @@ Docker Desktop does not require any particular Linux distributions to be install ``` If **WSL integrations** isn't available under **Resources**, Docker may be in Windows container mode. In your taskbar, select the Docker menu and then **Switch to Linux containers**. -3. Select **Apply & Restart**. +3. Select **Apply**. > [!NOTE] > @@ -103,9 +103,19 @@ Docker Desktop does not require any particular Linux distributions to be install > > Note that Docker Desktop version 4.30 and later keeps using the `docker-desktop-data` distribution if it was already created by an earlier version of Docker Desktop and has not been freshly installed or factory reset. +## WSL 2 security in Docker Desktop + +Docker Desktop’s WSL 2 integration operates within the existing security model of WSL and does not introduce additional security risks beyond standard WSL behavior. + +Docker Desktop runs within its own dedicated WSL distribution, `docker-desktop`, which follows the same isolation properties as any other WSL distribution. The only interaction between Docker Desktop and other installed WSL distributions occurs when the Docker Desktop **WSL integration** feature is enabled in settings. This feature allows easy access to the Docker CLI from integrated distributions. + +WSL is designed to facilitate interoperability between Windows and Linux environments. Its file system is accessible from the Windows host `\\wsl$`, meaning Windows processes can read and modify files within WSL. This behavior is not specific to Docker Desktop, but rather a core aspect of WSL itself. + +For organizations concerned about security risks related to WSL and want stricter isolation and security controls, run Docker Desktop in Hyper-V mode instead of WSL 2. Alternatively, run your container workloads with [Enhanced Container Isolation](/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/_index.md) enabled. ## Additional resources - [Explore best practices](best-practices.md) - [Understand how to develop with Docker and WSL 2](use-wsl.md) - [Learn about GPU support with WSL 2](/manuals/desktop/features/gpu.md) +- [Custom kernels on WSL](custom-kernels.md) diff --git a/content/manuals/desktop/features/wsl/best-practices.md b/content/manuals/desktop/features/wsl/best-practices.md index 393604b2cede..9645f0ac533e 100644 --- a/content/manuals/desktop/features/wsl/best-practices.md +++ b/content/manuals/desktop/features/wsl/best-practices.md @@ -7,7 +7,7 @@ aliases: - /desktop/wsl/best-practices/ --- -- Always use the latest version of WSL. At a minimum you must use WSL version 1.1.3.0., otherwise Docker Desktop may not work as expected. Testing, development, and documentation is based on the newest kernel versions. Older versions of WSL can cause: +- Always use the latest version of WSL. At a minimum you must use WSL version 2.1.5, otherwise Docker Desktop may not work as expected. Testing, development, and documentation is based on the newest kernel versions. Older versions of WSL can cause: - Docker Desktop to hang periodically or when upgrading - Deployment via SCCM to fail - The `vmmem.exe` to consume all memory diff --git a/content/manuals/desktop/features/wsl/custom-kernels.md b/content/manuals/desktop/features/wsl/custom-kernels.md new file mode 100644 index 000000000000..0fd8fcf01867 --- /dev/null +++ b/content/manuals/desktop/features/wsl/custom-kernels.md @@ -0,0 +1,32 @@ +--- +title: Custom kernels on WSL +description: Using custom kernels with Docker Desktop on WSL 2 +keywords: wsl, docker desktop, custom kernel +tags: [Best practices, troubleshooting] +--- + +Docker Desktop depends on several kernel features built into the default +WSL 2 Linux kernel distributed by Microsoft. Consequently, using a +custom kernel with Docker Desktop on WSL 2 is not officially supported +and may cause issues with Docker Desktop startup or operation. + +However, in some cases it may be necessary +to run custom kernels; Docker Desktop does not block their use, and +some users have reported success using them. + +If you choose to use a custom kernel, it is recommended you start +from the kernel tree distributed by Microsoft from their [official +repository](https://github.com/microsoft/WSL2-Linux-Kernel) and then add +the features you need on top of that. + +It's also recommended that you: +- Use the same kernel version as the one distributed by the latest WSL2 +release. You can find the version by running `wsl.exe --system uname -r` +in a terminal. +- Start from the default kernel configuration as provided by Microsoft +from their [repository](https://github.com/microsoft/WSL2-Linux-Kernel) +and add the features you need on top of that. +- Make sure that your kernel build environment includes `pahole` and +its version is properly reflected in the corresponding kernel config +(`CONFIG_PAHOLE_VERSION`). + diff --git a/content/manuals/desktop/images/build-ui-active-builds.webp b/content/manuals/desktop/images/build-ui-active-builds.webp deleted file mode 100644 index 792f71d92fe2..000000000000 Binary files a/content/manuals/desktop/images/build-ui-active-builds.webp and /dev/null differ diff --git a/content/manuals/desktop/images/build-ui-error.webp b/content/manuals/desktop/images/build-ui-error.webp deleted file mode 100644 index bdc5ac7c4e3e..000000000000 Binary files a/content/manuals/desktop/images/build-ui-error.webp and /dev/null differ diff --git a/content/manuals/desktop/images/build-ui-history.webp b/content/manuals/desktop/images/build-ui-history.webp deleted file mode 100644 index 4b2ac628c91a..000000000000 Binary files a/content/manuals/desktop/images/build-ui-history.webp and /dev/null differ diff --git a/content/manuals/desktop/images/build-ui-manage-builders.webp b/content/manuals/desktop/images/build-ui-manage-builders.webp deleted file mode 100644 index 29f86e56305c..000000000000 Binary files a/content/manuals/desktop/images/build-ui-manage-builders.webp and /dev/null differ diff --git a/content/manuals/desktop/images/build-ui-platform-menu.webp b/content/manuals/desktop/images/build-ui-platform-menu.webp deleted file mode 100644 index 8242889503b0..000000000000 Binary files a/content/manuals/desktop/images/build-ui-platform-menu.webp and /dev/null differ diff --git a/content/manuals/desktop/images/build-ui-timing-chart.webp b/content/manuals/desktop/images/build-ui-timing-chart.webp deleted file mode 100644 index f0374088a693..000000000000 Binary files a/content/manuals/desktop/images/build-ui-timing-chart.webp and /dev/null differ diff --git a/content/manuals/desktop/images/builds-view.webp b/content/manuals/desktop/images/builds-view.webp deleted file mode 100644 index 56b807d49b22..000000000000 Binary files a/content/manuals/desktop/images/builds-view.webp and /dev/null differ diff --git a/content/manuals/desktop/images/dashboard.png b/content/manuals/desktop/images/dashboard.png new file mode 100644 index 000000000000..904ff4dcd887 Binary files /dev/null and b/content/manuals/desktop/images/dashboard.png differ diff --git a/content/manuals/desktop/images/dashboard.webp b/content/manuals/desktop/images/dashboard.webp deleted file mode 100644 index ac3346c820ca..000000000000 Binary files a/content/manuals/desktop/images/dashboard.webp and /dev/null differ diff --git a/content/manuals/desktop/images/k8s-settings.png b/content/manuals/desktop/images/k8s-settings.png new file mode 100644 index 000000000000..aa8882b76473 Binary files /dev/null and b/content/manuals/desktop/images/k8s-settings.png differ diff --git a/content/manuals/desktop/images/notifications.svg b/content/manuals/desktop/images/notifications.svg deleted file mode 100644 index 45a9e4904f39..000000000000 --- a/content/manuals/desktop/images/notifications.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/content/manuals/desktop/previous-versions/2.x-mac.md b/content/manuals/desktop/previous-versions/2.x-mac.md index d1e9b00fb78b..a582c9a6aeb9 100644 --- a/content/manuals/desktop/previous-versions/2.x-mac.md +++ b/content/manuals/desktop/previous-versions/2.x-mac.md @@ -28,7 +28,7 @@ Docker Desktop 2.5.0.0 contains a Kubernetes upgrade. Your local Kubernetes clus ### New -- Users with a paid Docker subscription plan can now see the vulnerability scan report on the Remote repositories tab in Docker Desktop. +- Users with a paid Docker subscription can now see the vulnerability scan report on the Remote repositories tab in Docker Desktop. - Docker Desktop introduces a support option for users with a paid Docker subscription. ### Security diff --git a/content/manuals/desktop/previous-versions/3.x-mac.md b/content/manuals/desktop/previous-versions/3.x-mac.md index 3444a12d02f3..fe1ca74e4a2d 100644 --- a/content/manuals/desktop/previous-versions/3.x-mac.md +++ b/content/manuals/desktop/previous-versions/3.x-mac.md @@ -49,7 +49,7 @@ This page contains release notes for Docker Desktop for Mac 3.x. ### New -**Dev Environments Preview**: Dev Environments enable you to seamlessly collaborate with your team members without moving between Git branches to get your code onto your team members' machines. When using Dev Environments, you can share your in-progress work with your team members in just one click, and without having to deal with any merge conflicts. For more information and for instructions on how to use Dev Environments, see [Development Environments Preview](/manuals/desktop/features/dev-environments/_index.md). +**Dev Environments Preview**: Dev Environments enable you to seamlessly collaborate with your team members without moving between Git branches to get your code onto your team members' machines. When using Dev Environments, you can share your in-progress work with your team members in just one click, and without having to deal with any merge conflicts. ### Upgrades @@ -60,8 +60,8 @@ This page contains release notes for Docker Desktop for Mac 3.x. - Fixed network's IPAM configuration. Service can define a fixed IP. Fixes for [docker/compose-cli#1678](https://github.com/docker/compose-cli/issues/1678) and [docker/compose-cli#1816](https://github.com/docker/compose-cli/issues/1816) - Dev Environments - - Support VS Code Insiders. See [dev-environments#3](https://github.com/docker/dev-environments/issues/3) - - Allow users to specify a branch when cloning a project. See [dev-environments#11](https://github.com/docker/dev-environments/issues/11) + - Support VS Code Insiders. + - Allow users to specify a branch when cloning a project. ### Bug fixes and minor changes @@ -77,7 +77,7 @@ This page contains release notes for Docker Desktop for Mac 3.x. ### New -**Dev Environments Preview**: Dev Environments enable you to seamlessly collaborate with your team members without moving between Git branches to get your code onto your team members' machines. When using Dev Environments, you can share your in-progress work with your team members in just one click, and without having to deal with any merge conflicts. For more information and for instructions on how to use Dev Environments, see [Development Environments Preview](/manuals/desktop/features/dev-environments/_index.md). +**Dev Environments Preview**: Dev Environments enable you to seamlessly collaborate with your team members without moving between Git branches to get your code onto your team members' machines. When using Dev Environments, you can share your in-progress work with your team members in just one click, and without having to deal with any merge conflicts. **Compose V2 beta**: Docker Desktop now includes the beta version of Compose V2, which supports the `docker compose` command as part of the Docker CLI. While `docker-compose` is still supported and maintained, Compose V2 implementation relies directly on the compose-go bindings which are maintained as part of the specification. The compose command in the Docker CLI supports most of the `docker-compose` commands and flags. It is expected to be a drop-in replacement for `docker-compose`. There are a few remaining flags that have yet to be implemented, see the [docker-compose compatibility list](/manuals/compose/releases/migrate.md) for more information about the flags that are supported in the new compose command. If you run into any problems with Compose V2, you can easily switch back to Compose v1 by either by making changes in Docker Desktop **Experimental** Settings, or by running the command `docker-compose disable-v2`. Let us know your feedback on the new ‘compose’ command by creating an issue in the [Compose-CLI](https://github.com/docker/compose-cli/issues) GitHub repository. @@ -90,7 +90,7 @@ This page contains release notes for Docker Desktop for Mac 3.x. ### New -**Dev Environments Preview**: Dev Environments enable you to seamlessly collaborate with your team members without moving between Git branches to get your code onto your team members' machines. When using Dev Environments, you can share your in-progress work with your team members in just one click, and without having to deal with any merge conflicts. For more information and for instructions on how to use Dev Environments, see [Development Environments Preview](/manuals/desktop/features/dev-environments/_index.md). +**Dev Environments Preview**: Dev Environments enable you to seamlessly collaborate with your team members without moving between Git branches to get your code onto your team members' machines. When using Dev Environments, you can share your in-progress work with your team members in just one click, and without having to deal with any merge conflicts. **Compose V2 beta**: Docker Desktop now includes the beta version of Compose V2, which supports the `docker compose` command as part of the Docker CLI. While `docker-compose` is still supported and maintained, Compose V2 implementation relies directly on the compose-go bindings which are maintained as part of the specification. The compose command in the Docker CLI supports most of the `docker-compose` commands and flags. It is expected to be a drop-in replacement for `docker-compose`. There are a few remaining flags that have yet to be implemented, see the [docker-compose compatibility list](/manuals/compose/releases/migrate.md) for more information about the flags that are supported in the new compose command. If you run into any problems with Compose V2, you can easily switch back to Compose v1 by either by making changes in Docker Desktop **Experimental** Settings, or by running the command `docker-compose disable-v2`. Let us know your feedback on the new ‘compose’ command by creating an issue in the [Compose-CLI](https://github.com/docker/compose-cli/issues) GitHub repository. diff --git a/content/manuals/desktop/previous-versions/3.x-windows.md b/content/manuals/desktop/previous-versions/3.x-windows.md index b93c1c7435fa..8cc3a627f86b 100644 --- a/content/manuals/desktop/previous-versions/3.x-windows.md +++ b/content/manuals/desktop/previous-versions/3.x-windows.md @@ -56,7 +56,7 @@ This page contains release notes for Docker Desktop for Windows 3.x. ### New -**Dev Environments Preview**: Dev Environments enable you to seamlessly collaborate with your team members without moving between Git branches to get your code onto your team members' machines. When using Dev Environments, you can share your in-progress work with your team members in just one click, and without having to deal with any merge conflicts. For more information and for instructions on how to use Dev Environments, see [Development Environments Preview](/manuals/desktop/features/dev-environments/_index.md). +**Dev Environments Preview**: Dev Environments enable you to seamlessly collaborate with your team members without moving between Git branches to get your code onto your team members' machines. When using Dev Environments, you can share your in-progress work with your team members in just one click, and without having to deal with any merge conflicts. ### Upgrades @@ -67,7 +67,7 @@ This page contains release notes for Docker Desktop for Windows 3.x. - Fixed network's IPAM configuration. Service can define a fixed IP. Fixes for [docker/compose-cli#1678](https://github.com/docker/compose-cli/issues/1678) and [docker/compose-cli#1816](https://github.com/docker/compose-cli/issues/1816) - Dev Environments - - Support VS Code Insiders. See [dev-environments#3](https://github.com/docker/dev-environments/issues/3) + - Support VS Code Insiders. - Allow users to specify a branch when cloning a project. See [dev-environments#11](https://github.com/docker/dev-environments/issues/11) ### Bug fixes and minor changes @@ -84,7 +84,7 @@ This page contains release notes for Docker Desktop for Windows 3.x. ### New -**Dev Environments Preview**: Dev Environments enable you to seamlessly collaborate with your team members without moving between Git branches to get your code onto your team members' machines. When using Dev Environments, you can share your in-progress work with your team members in just one click, and without having to deal with any merge conflicts. For more information and for instructions on how to use Dev Environments, see [Development Environments Preview](/manuals/desktop/features/dev-environments/_index.md). +**Dev Environments Preview**: Dev Environments enable you to seamlessly collaborate with your team members without moving between Git branches to get your code onto your team members' machines. When using Dev Environments, you can share your in-progress work with your team members in just one click, and without having to deal with any merge conflicts. **Compose V2 beta**: Docker Desktop now includes the beta version of Compose V2, which supports the `docker compose` command as part of the Docker CLI. While `docker-compose` is still supported and maintained, Compose V2 implementation relies directly on the compose-go bindings which are maintained as part of the specification. The compose command in the Docker CLI supports most of the `docker-compose` commands and flags. It is expected to be a drop-in replacement for `docker-compose`. There are a few remaining flags that have yet to be implemented, see the [docker-compose compatibility list](/manuals/compose/releases/migrate.md) for more information about the flags that are supported in the new compose command. If you run into any problems with Compose V2, you can easily switch back to Compose v1 by either by making changes in Docker Desktop **Experimental** Settings, or by running the command `docker-compose disable-v2`. Let us know your feedback on the new ‘compose’ command by creating an issue in the [Compose-CLI](https://github.com/docker/compose-cli/issues) GitHub repository. @@ -98,7 +98,7 @@ This page contains release notes for Docker Desktop for Windows 3.x. ### New -**Dev Environments Preview**: Dev Environments enable you to seamlessly collaborate with your team members without moving between Git branches to get your code onto your team members' machines. When using Dev Environments, you can share your in-progress work with your team members in just one click, and without having to deal with any merge conflicts. For more information and for instructions on how to use Dev Environments, see [Development Environments Preview](/manuals/desktop/features/dev-environments/_index.md). +**Dev Environments Preview**: Dev Environments enable you to seamlessly collaborate with your team members without moving between Git branches to get your code onto your team members' machines. When using Dev Environments, you can share your in-progress work with your team members in just one click, and without having to deal with any merge conflicts. **Compose V2 beta**: Docker Desktop now includes the beta version of Compose V2, which supports the `docker compose` command as part of the Docker CLI. While `docker-compose` is still supported and maintained, Compose V2 implementation relies directly on the compose-go bindings which are maintained as part of the specification. The compose command in the Docker CLI supports most of the `docker-compose` commands and flags. It is expected to be a drop-in replacement for `docker-compose`. There are a few remaining flags that have yet to be implemented, see the [docker-compose compatibility list](/manuals/compose/releases/migrate.md) for more information about the flags that are supported in the new compose command. If you run into any problems with Compose V2, you can easily switch back to Compose v1 by either by making changes in Docker Desktop **Experimental** Settings, or by running the command `docker-compose disable-v2`. Let us know your feedback on the new ‘compose’ command by creating an issue in the [Compose-CLI](https://github.com/docker/compose-cli/issues) GitHub repository. diff --git a/content/manuals/desktop/release-notes.md b/content/manuals/desktop/release-notes.md index 77e393ef4eee..2320408ad876 100644 --- a/content/manuals/desktop/release-notes.md +++ b/content/manuals/desktop/release-notes.md @@ -17,16 +17,462 @@ aliases: weight: 220 --- -This page contains information about the new features, improvements, known issues, and bug fixes in Docker Desktop releases. For frequently asked questions about Docker Desktop releases, see [FAQs](/manuals/desktop/troubleshoot-and-support/faqs/releases.md). +This page contains information about the new features, improvements, known issues, and bug fixes in Docker Desktop releases. + +Releases are gradually rolled out to ensure quality control. If the latest version is not yet available to you, allow some time — updates typically become available within a week of the release date. Docker Desktop versions older than 6 months from the latest release are not available for download. Previous release notes are available in our [documentation repository](https://github.com/docker/docs/tree/main/content/manuals/desktop/previous-versions). -Take a look at the [Docker Public Roadmap](https://github.com/orgs/docker/projects/51/views/1?filterQuery=) to see what's coming next. +For more frequently asked questions, see the [FAQs](/manuals/desktop/troubleshoot-and-support/faqs/releases.md). > [!WARNING] > > If you're experiencing malware detection issues on Mac, follow the steps documented in [docker/for-mac#7527](https://github.com/docker/for-mac/issues/7527). +## 4.43.2 + +{{< release-date date="2025-07-15" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.43.2" build_path="/199162/" >}} + +### Upgrades + +- [Docker Compose v2.38.2](https://github.com/docker/compose/releases/tag/v2.38.2) +- [Docker Engine v28.3.2](https://docs.docker.com/engine/release-notes/28/#2832) +- Docker Model CLI v0.1.33 + +## 4.43.1 + +{{< release-date date="2025-07-04" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.43.1" build_path="/198352/" >}} + +### Bug fixes and enhancements + +#### For all platforms + +- Fixed an issue that caused Docker Desktop UI to break when Ask Gordon responses contained HTML tags. +- Fixed an issue that prevented extensions from communicating with their backends. + +## 4.43.0 + +{{< release-date date="2025-07-03" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.43.0" build_path="/198134/" >}} + +### New + +- [Compose Bridge](/manuals/compose/bridge/_index.md) is now generally available. + +### Upgrades + +- [Docker Buildx v0.25.0](https://github.com/docker/buildx/releases/tag/v0.25.0) +- [Docker Compose v2.38.1](https://github.com/docker/compose/releases/tag/v2.38.1) +- [Docker Engine v28.3.0](https://docs.docker.com/engine/release-notes/28/#2830) +- [NVIDIA Container Toolkit v1.17.8](https://github.com/NVIDIA/nvidia-container-toolkit/releases/tag/v1.17.8) + +### Security + +- Fixed [CVE-2025-6587](https://www.cve.org/CVERecord?id=CVE-2025-6587) where sensitive system environment variables were included in Docker Desktop diagnostic logs, allowing for potential secret exposure. + +### Bug fixes and enhancements + +#### For all platforms + +- Fixed a bug causing `docker start` to drop the container's port mappings for a container already running. +- Fixed a bug that prevented container ports to be displayed on the GUI when a container was re-started. +- Fixed a bug that caused Docker API `500 Internal Server Error for API route and version` error application start. +- The settings **Apply & restart** button is now labeled **Apply**. The VM is no longer restarted when applying changed settings. +- Fixed a bug where the disk would be corrupted if Docker is shutdown during a `fsck`. +- Fixed a bug causing an incorrect `~/.kube/config` in WSL2 when using a `kind` Kubernetes cluster. +- Return an explicit error to a Docker API / `docker` CLI command if Docker Desktop has been manually paused. +- Fixed an issue where unknown keys in Admin and Cloud settings caused a failure. + +#### For Mac + +- Removed `eBPF` which blocked `io_uring`. To enable `io_uring` in a container, use `--security-opt seccomp=unconfined`. Fixes [docker/for-mac#7707](https://github.com/docker/for-mac/issues/7707). + +#### For Windows + +- Fixed an issue that caused the Docker Desktop installer to crash when the current user has no `SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall` registry key. +- Fixed a bug where Docker Desktop could leak a `com.docker.build` process and fail to start. Fixes [docker/for-win#14840](https://github.com/docker/for-win/issues/14840) + +### Known issues + +#### For all platforms + +- `docker buildx bake` will not build images in Compose files with a top-level models attribute. Use `docker compose build` instead. +- Gordon responses containing HTML can cause Desktop UI to be permanently broken. As a workaround, you can delete `persisted-state.json` file to reset the UI. The file is located in the following directories: + - Windows: `%APPDATA%\Docker Desktop\persisted-state.json` + - Linux: `$XDG_CONFIG_HOME/Docker Desktop/persisted-state.json` or `~/.config/Docker Desktop/persisted-state.json` + - Mac: `~/Library/Application Support/Docker Desktop/persisted-state.json` + +#### For Windows + +- Possible incompatibility between the "host networking" feature of Docker Desktop and the most recent WSL 2 Linux kernel. If you encounter such issues, downgrade WSL 2 to 2.5.7. + +## 4.42.1 + +{{< release-date date="2025-06-18" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.42.1" build_path="/196648/" >}} + +### Upgrades + +- [Docker Compose v2.37.1](https://github.com/docker/compose/releases/tag/v2.37.1) + +### Bug fixes and enhancements + +#### For all platforms + +- Fixed an issue where Docker domains were not reachable when the proxy configuration is not valid. +- Fixed a possible deadlock when exposing ports. +- Fixed a race condition which can cause `docker run -p` ports to disappear. + +#### For Mac + +- Fixed a bug where a container’s port list appeared empty when inspected immediately after it was created, for example, when using a script. [docker/for-mac#7693](https://github.com/docker/for-mac/issues/7693) + +#### For Windows + +- Disabled the Resource Saver mode in WSL 2 to prevent `docker` CLI commands hanging in WSL 2 distros. [docker/for-win#14656](https://github.com/docker/for-win/issues/14656#issuecomment-2960285463) + +## 4.42.0 + +{{< release-date date="2025-06-04" >}} + +{{< desktop-install-v2 all=true win_arm_release="Early Access" version="4.42.0" build_path="/195023/" >}} + +### New + +- Expanded network compatibility with IPv6 support. +- The Docker MCP Toolkit is now natively integrated into Docker Desktop. +- Docker Model Runner is now available for Windows systems running on Qualcomm/ARM GPUs. +- Added a **Logs** tab to the Models view so you can see the inference engine output in real time. +- Gordon now integrates the MCP Toolkit, providing access to 100+ MCP servers. + +### Upgrades + +- [Docker Buildx v0.24.0](https://github.com/docker/buildx/releases/tag/v0.24.0) +- [Docker Engine v28.2.2](https://docs.docker.com/engine/release-notes/28/#2822) +- [Compose Bridge v0.0.20](https://github.com/docker/compose-bridge-binaries/releases/tag/v0.0.20) +- [Docker Compose v2.36.2](https://github.com/docker/compose/releases/tag/v2.36.2) +- [NVIDIA Container Toolkit v1.17.7](https://github.com/NVIDIA/nvidia-container-toolkit/releases/tag/v1.17.7) +- [Docker Scout CLI v1.18.0](https://github.com/docker/scout-cli/releases/tag/v1.18.0) + +### Bug fixes and enhancements + +#### For all platforms + +- Docker Desktop now accepts certificates with a negative serial number. +- Re-enable `seccomp` for containers by default. Use `docker run --security-opt seccomp=unconfined` to disable seccomp for a container. +- Fixed a bug that caused Docker Desktop to hang when it ran out of memory. +- Block `io_uring` syscalls in containers. +- Added support for pulling models from Docker Hub directly, simplifying the process of accessing and using models. +- Docker Desktop now sets the disk usage limit to the size of the physical disk on fresh install and reset to defaults on Mac and Linux. +- The maximum disk size in the settings UI now aligns with the full capacity of the host file system. +- The **Models** view now has a **Docker Hub** tab that lists models under the `ai` namespace. +- Improved the sign-in enforcement message when more than 10 organizations are enforced. +- Changed the way ports are mapped by Docker Desktop to fully support IPv6 ports. +- Fixed a bug in the Dashboard container logs screen causing the scrollbar to disappear as the mouse approaches. +- [Enforced sign-in](/manuals/enterprise/security/enforce-sign-in/_index.md) fixed for Teams subscription users. +- `llama.cpp` server now supports streaming and tool calling in Model Runner. +- Sign-in Enforcement capability is now available to all subscriptions. + +#### For Mac + +- Fixed a bug where the disk would always have a minimum usage limit of 64GB when using Docker VMM. +- Disabled the memory protection keys mechanism in the Docker Desktop Linux VM. This caused VS Code Dev Containers to not work properly. See [docker/for-mac#7667](https://github.com/docker/for-mac/issues/7667). +- Fixed persistent volume claims under Kubernetes. Fixes [docker/for-mac#7625](https://github.com/docker/for-mac/issues/7625). +- Fixed a bug where the VM failed to start using Apple virtualization.framework. +- Minimum version to install or update Docker Desktop on is now macOS Ventura 13.3. + +#### For Windows + +- Fixed a bug in Enhanced Container Isolation on Windows WSL, where files with hardlinks inside containers had `nobody:nogroup` ownership. +- Fixed a bug that caused Docker Desktop to crash. Related to [docker/for-win#14782](https://github.com/docker/for-win/issues/14782). +- Fixed a bug that caused `The network name cannot be found` error when starting with WSL 2. Fixes [docker/for-win#14714](https://github.com/docker/for-win/issues/14714). +- Fixed an issue where Docker Desktop would not remove entries in the hosts file when uninstalling. +- Fixed an issue when reading auto-start registry key for some system languages. Fixes [docker/for-win#14731](https://github.com/docker/for-win/issues/14731). +- Fixed a bug where Docker Desktop was adding unrecognised /etc/wsl.conf `crossDistro` option which was causing WSL 2 to log an error. See [microsoft/WSL#4577](https://github.com/microsoft/WSL/issues/4577) +- Fixed a bug where Docker Desktop failed to start on WSL 2.5.7 if another WSL distro is still using Linux cgroups v1. Fixes [docker/for-win#14801](https://github.com/docker/for-win/issues/14801) +- Windows Subsystem for Linux (WSL) version 2.1.5 is now the minimum version required for proper functioning of Docker Desktop application + +### Known issues + +#### For all platforms + +- This release contains a regression with `docker port`, resulting in "No host port found for host IP" errors when using testcontainers-node. See [testcontainers/testcontainers-node#818](https://github.com/testcontainers/testcontainers-node/issues/818#issuecomment-2941575369) + +#### For Windows + +- Running containers with Wasm will hang sporadically. See [docker/for-mac#7666](https://github.com/docker/for-mac/issues/7666). +- On some machines Resource Saver will cause other WSL 2 distros to freeze. The workaround is to disable Resource Saver. See [docker/for-win#14656](https://github.com/docker/for-win/issues/14656). + +## 4.41.2 + +{{< release-date date="2025-05-06" >}} + +{{< desktop-install-v2 all=true win_arm_release="Beta" version="4.41.2" build_path="/191736/" >}} + +### Bug fixes and enhancements + +#### For all platforms + +- Fixed an issue where the `Models` menu was displayed in the GUI even when Docker Model Runner was not supported or not enabled. + +## 4.41.1 + +{{< release-date date="2025-04-30" >}} + +{{< desktop-install-v2 all=true win_arm_release="Beta" version="4.41.1" build_path="/191279/" >}} + +### Bug fixes and enhancements + +#### For all platforms + +- Fixed an issue where Docker Desktop failed to start when a proxy configuration was specified in the `admin-settings.json` file. + +#### For Windows + +- Fixed possible conflict with 3rd party tools (for example, Ollama) by avoiding placing `llama.cpp` DLLs in a directory included in the system `PATH`. + +## 4.41.0 + +{{< release-date date="2025-04-28" >}} + +{{< desktop-install-v2 all=true win_arm_release="Beta" version="4.41.0" build_path="/190950/" >}} + +### New + +- Docker Model Runner is now available on x86 Windows machines with NVIDIA GPUs. +- You can now [push models](/manuals/ai/model-runner.md#push-a-model-to-docker-hub) to Docker Hub with Docker Model Runner. +- Added support for Docker Model Runner's model management and chat interface in Docker Desktop for Mac and Windows (on hardware supporting Docker Model Runner). Users can now view, interact with, and manage local AI models through a new dedicated interface. +- [Docker Compose](/manuals/ai/compose/models-and-compose.md) and Testcontainers [Java](https://java.testcontainers.org/modules/docker_model_runner/) and [Go](https://golang.testcontainers.org/modules/dockermodelrunner/) now support Docker Model Runner. +- Introducing Docker Desktop in the [Microsoft App Store](https://apps.microsoft.com/detail/xp8cbj40xlbwkx?hl=en-GB&gl=GB). + +### Upgrades + +- [Docker Engine v28.1.1](https://docs.docker.com/engine/release-notes/28.1/#2811) +- [Docker Compose v2.35.1](https://github.com/docker/compose/releases/tag/v2.35.1) +- [Docker Buildx v0.23.0](https://github.com/docker/buildx/releases/tag/v0.23.0) +- [Docker Scout CLI v1.17.1](https://github.com/docker/scout-cli/releases/tag/v1.17.1) +- [Compose Bridge v0.0.19](https://github.com/docker/compose-bridge-binaries/releases/tag/v0.0.19) + +### Security + +- Fixed [CVE-2025-3224](https://www.cve.org/CVERecord?id=CVE-2025-3224) allowing an attacker with access to a user machine to perform an elevation of privilege when Docker Desktop updates. +- Fixed [CVE-2025-4095](https://www.cve.org/CVERecord?id=CVE-2025-4095) where Registry Access Management (RAM) policies were not enforced when using a MacOS configuration profile, allowing users to pull images from unapproved registries. +- Fixed [CVE-2025-3911](https://www.cve.org/CVERecord?id=CVE-2025-3911) allowing an attacker with read access to a user's machine to obtain sensitive information from Docker Desktop log files, including environment variables configured for running containers. + +### Bug fixes and enhancements + +#### For all platforms + +- Fixed a bug in DockerVMM that caused an excessive number of open file handles on the host. +- Fixed an issue where Docker Desktop failed to start if the `admin-settings.json` file didn't contain the optional `configurationFileVersion` configuration. +- Fixed a bug that was causing outgoing UDP connections to be eagerly closed. +- Enhanced log reading experience with advanced search capabilities and container-level filtering, enabling quicker debugging and troubleshooting. +- Improved error messages when downloading Registry Access Management configuration. +- If Docker can't bind an ICMPv4 socket, it now logs an error and continues rather than quits. +- Enabled the memory protection keys mechanism in the Docker Desktop Linux VM, allowing containers like Oracle database images to run correctly. +- Fixed a problem with containers accessing `/proc/sys/kernel/shm*` sysctls when [Enhanced Container Isolation](/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/_index.md) is enabled on Mac, Windows Hyper-V, or Linux. +- Added kernel module `nft_fib_inet`, required for running firewalld in a Linux container. +- MacOS QEMU Virtualization option is being deprecated on July 14, 2025. + +#### For Mac + +- Fixed a bug that caused high CPU usage. Fixes [docker/for-mac#7643](https://github.com/docker/for-mac/issues/7643). +- Fixed multi-arch build issue with Rosetta on M3 Macs. +- Fixed an issue where absence of `/Library/Application Support/com.docker.docker/` directory can cause failure to apply RAM policy restrictions. + +#### For Windows + +- The Windows `.exe` installer now includes improved handling of locked files. Fixes [docker/for-win#14299](https://github.com/docker/for-win/issues/14299) and [docker/for-win#14316](https://github.com/docker/for-win/issues/14316). +- Fixed `Docker Desktop.exe` not showing version information after installation. Fixes [docker/for-win#14703](https://github.com/docker/for-win/issues/14703). + +### Known issues + +#### For all platforms + +- If you have enforced sign-in using `desktop.plist` (on macOS) or Registry key (on Windows) and also have a `registry.json`, sign-in will fail if the user belongs to an organization listed in `desktop.plist`/ registry key but not to any organizations specified in `registry.json`. To resolve this, remove the `registry.json` file. + +#### For Windows + +- If multiple organizations are specified in the `allowedOrgs` Windows registry key using space-separated format, sign-in will fail and user will be logged out. As a workaround, specify each organization on a separate line in the registry key value. + +## 4.40.0 + +{{< release-date date="2025-03-31" >}} + +{{< desktop-install-v2 all=true win_arm_release="Beta" version="4.40.0" build_path="/187762/" >}} + +### New + +- You can now pull, run, and manage AI models from Docker Hub directly in Docker Desktop with [Docker Model Runner (Beta)](/manuals/ai/model-runner.md). Currently available for Docker Desktop for Mac with Apple Silicon. + +### Upgrades + +- [Docker Buildx v0.22.0](https://github.com/docker/buildx/releases/tag/v0.22.0) +- [Docker Compose v2.34.0](https://github.com/docker/compose/releases/tag/v2.34.0) +- [Docker Engine v28.0.4](https://docs.docker.com/engine/release-notes/28/#2804) +- [Docker Scout CLI v1.17.0](https://github.com/docker/scout-cli/releases/tag/v1.17.0) +- [compose-bridge v0.0.18](https://github.com/docker/compose-bridge-binaries/releases/tag/v0.0.18) +- [NVIDIA Container Toolkit v1.17.5](https://github.com/NVIDIA/nvidia-container-toolkit/releases/tag/v1.17.5) + +### Bug fixes and enhancements + +#### For all platforms + +- Fixed a bug that caused `docker-proxy` to stop forwarding UDP datagrams to containers. +- Fixed a bug that caused docker-proxy to close UDP connections to containers eagerly and resulting in the source address to change needlessly +- Fixed a race condition that prevented Docker Desktop Kubernetes from starting in some scenarios. +- Improved the way ECI collects image digest info from a repository in environments where proxies are configured. +- Users can now to specify a timeout when generating a private Extension Marketplace using the new `--timeout` flag. +- Removed unused internal helper tool `com.docker.admin` for Mac and Linux. + +#### For Mac + +- Fixed an issue where stale directory cache in Docker VMM that prevented detecting moved or new files. +- Removed Continue/Restart pop up when Time Machine utility is restricted. +- Docker Desktop now allows Unix domain sockets to be shared with containers via `docker run -v /path/to/unix.sock:/unix.sock`. The full socket path must be specified in the bind-mount. See [for-mac/#483](https://github.com/docker/for-mac/issues/483). +- Fixed a bug that caused the `docker-credential-osxkeychain` and `docker-credential-desktop` to return malformed URIs when a token was stored for a server with a port specified. + +#### For Windows + +- The Windows MSI and `.exe` installers now disable Windows Containers by default when installing with the GUI. +- Improved port-mapping throughput on WSL2. + +### Known issues + +#### For Windows + +- Switching to Windows Containers while the privileged helper error message is displayed could cause inconsistent state. As a workaround, quit Docker Desktop, change `UseWindowsContainers` to `false` in `settings-store.json` and restart Docker Desktop. +- After installation, `Docker Desktop.exe` does not contain the latest version information. + +## 4.39.0 + +{{< release-date date="2025-03-05" >}} + +{{< desktop-install-v2 all=true win_arm_release="Beta" version="4.39.0" build_path="/184744/" >}} + +### New + +- The [Docker Desktop CLI](/manuals/desktop/features/desktop-cli.md) is now generally available. You can now also print logs with the new `docker desktop logs` command. +- Docker Desktop now supports the `--platform` flag on [`docker load`](/reference/cli/docker/image/load.md) and [`docker save`](/reference/cli/docker/image/save.md). This helps you import and export a subset of multi-platform images. + +### Upgrades + +- [Docker Compose v2.33.1](https://github.com/docker/compose/releases/tag/v2.33.1) +- [Docker Buildx v0.21.1](https://github.com/docker/buildx/releases/tag/v0.21.1) +- [Kubernetes v1.32.2](https://github.com/kubernetes/kubernetes/releases/tag/v1.32.2) +- [Docker Engine v28.0.1](https://docs.docker.com/engine/release-notes/28/#2801) +- [Docker Scout CLI v1.16.3](https://github.com/docker/scout-cli/releases/tag/v1.16.3) + +### Security + +- Fixed [CVE-2025-1696](https://www.cve.org/CVERecord?id=CVE-2025-1696) which could disclose proxy authentication credentials in plaintext in log files. + +### Bug fixes and enhancements + +#### For all platforms + +- Ask Gordon now offers deeper context on Docker images, containers, and volumes, delivers faster support, and enables more user actions via Docker Desktop and the Docker CLI. +- Support multi-platform images via enabling users to pick a specific platform in `docker history` +- Fixed an issue that caused clients other than the CLI and Docker Desktop to see a delay of 3 seconds whenever a container with port-mappings exists. See [docker/for-mac#7575](https://github.com/docker/for-mac/issues/7575) +- Fixed a bug in the ECI Docker socket permissions which caused it to sometimes block Docker socket mounts on containers with allowed images, or images derived from allowed images. +- Fixed a bug that prevented Docker Desktop from entering Resource Saver mode again immediately after an engine restart. +- Fixed an issue that caused Kubernetes clusters to stop working due to expired PKI certificates. + +#### For Mac + +- Downgraded Linux kernel to `v6.10.14` to fix a bug in OpenJDK that causes Java containers to terminate due to cgroups controller misidentification. See [docker/for-mac#7573](https://github.com/docker/for-mac/issues/7573). +- Added `/usr/share/misc/usb.ids` in the root mount namespace to fix `usbip`. +- Fixed an issue where the display of the CPU limit was capped at 8 when using Docker VMM. +- Fixed an issue where startup would hang and the `com.docker.backend` process consumed 100% of the CPU. See [docker/for-mac#6951](https://github.com/docker/for-mac/issues/6951). +- Fixed a bug that caused all Java programs running on M4 Macbook Pro to emit a SIGILL error. See [docker/for-mac#7583](https://github.com/docker/for-mac/issues/7583). +- Blocked startup on macOS 15.4 beta 1 since starting VMs will cause the host to crash, see https://developer.apple.com/documentation/macos-release-notes/macos-15_4-release-notes#Virtual-Machines. +- Fixed an issue where the myIPAddress PAC file function retrieved the host IP from the wrong interface, causing incorrect proxy selection. + +#### For Windows + +- Fixed a bug that prevented `docker compose log` to stream when running apps in WSL. +- Fixed a bug where Paketo buildpacks failed with Enhanced Container Isolation when Docker Desktop uses WSL. +- Fixed a bug where the WSL 2 integration would fail if WSL Version 1 distributions are installed. +- Fixed a bug that caused some CLI plugins update to fail if WSL distributions were enabled. +- Fixed a bug where Docker Desktop sign-in would hang when using a PAC file for proxy configuration, causing a blurred UI and blocking access. + +#### For Linux + +- The **Software Updates** page in settings, now points to the latest available version available. + +## 4.38.0 + +{{< release-date date="2025-01-30" >}} + +{{< desktop-install-v2 all=true win_arm_release="Beta" version="4.38.0" build_path="/181591/" >}} + +### New + +- Installing Docker Desktop via the PKG installer is now generally available. +- Enforcing sign-in via configuration profiles is now generally available. +- Docker Compose, Docker Scout, the Docker CLI, and Ask Gordon can now be updated independently of Docker Desktop and without a full restart (Beta). +- The new [`update` command](/reference/cli/docker/desktop/update.md) has been added to the Docker Desktop CLI (Mac only). +- [Bake](/manuals//build/bake/_index.md) is now generally available, with support for entitlements and composable attributes. +- You can now create [multi-node Kubernetes clusters](/manuals/desktop/settings-and-maintenance/settings.md#kubernetes) in Docker Desktop. +- [Ask Gordon](/manuals/ai/gordon/_index.md) is more widely available. It is still in Beta. + +### Upgrades + +- [containerd v1.7.24](https://github.com/containerd/containerd/releases/tag/v1.7.24) +- [Docker Buildx v0.20.1](https://github.com/docker/buildx/releases/tag/v0.20.1) +- [Docker Compose v2.32.4](https://github.com/docker/compose/releases/tag/v2.32.4) +- [Docker Engine v27.5.1](https://docs.docker.com/engine/release-notes/27/#2751) +- [Docker Scout CLI v1.16.1](https://github.com/docker/scout-cli/releases/tag/v1.16.1) +- [Runc v1.2.2](https://github.com/opencontainers/runc/releases/tag/v1.2.2) +- [NVIDIA Container Toolkit v1.17.4](https://github.com/NVIDIA/nvidia-container-toolkit/releases/tag/v1.17.4) +- [Kubernetes v1.31.4](https://github.com/kubernetes/kubernetes/releases/tag/v1.31.4) +- Docker Debug `v0.0.38` + +### Bug fixes and enhancements + +#### For all platforms + +- Fixed a bug where access tokens generated by the `docker login` web flow could not be refreshed by Docker Desktop. +- Fixed a bug where container creation via the Docker API using `curl` failed when [Enhanced Container Isolation](/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/_index.md) was enabled. +- Fixed a bug where the RAM policy was not refreshed after the refresh period had elapsed. +- Fixed a bug in Enhanced Container Isolation when mounting the Docker socket into a container, and then creating Docker containers with bind-mounts from within that container. +- Fixed an issue that caused a discrepancy between the GUI and the CLI, the former forcing the `0.0.0.0` HostIP in port-mappings. This caused default binding IPs configured through Engine's `ip` flag, or through the bridge option `com.docker.network.bridge.host_binding_ipv4`, to not be used. +- Fixed a bug where the `pac` setting was ignored in `admin-settings.json`. +- Build UI: + - Added a progress status when importing a build. + - Fixed a bug where users were unable to import builds. + - Fixed a bug where some builders using SSH endpoints were not skipped. + +#### For Mac + +- Fixed a bug in Docker VMM where bind-mounts from non-root volumes would weren't working as expected. +- Fixed an issue that caused startup failures on systems without IPv6. Fixes [docker/for-mac#14298](https://github.com/docker/for-win/issues/14298). +- Fixed a bug that caused Docker Desktop to hang. See [docker/for-mac#7493](https://github.com/docker/for-mac/issues/7493#issuecomment-2568594070). +- Fixed an issue where the uninstaller would fail if the settings file is missing. +- Fixed a bug where config profiles deployed via Workspace One were ignored. + +#### For Windows + +- The Docker Desktop installer will now present a UAC prompt when launched. +- Fixed an issue where Docker Desktop would fail to start for data disks created with old WSL versions that shared the same identifier as other WSL distros. +- Docker Desktop now restarts when WSL integration settings are changed. This ensures proper setup of WSL integration when using Enhanced Container Isolation. + +#### For Linux + +- Added support for gvisor networking. Users with an incompatible version of qemu (8.x) will stay on qemu networking, and others will be migrated automatically. + +### Deprecation + +#### For all platforms + +- Deprecated `com.docker.diagnose check|check-dot|check-hypervisordetect-host-hypervisor`. + ## 4.37.2 {{< release-date date="2025-01-09" >}} @@ -49,14 +495,14 @@ Take a look at the [Docker Public Roadmap](https://github.com/orgs/docker/projec {{< release-date date="2024-12-17" >}} -{{< desktop-install-v2 all=true beta_win_arm=true version="4.37.1" build_path="/178610/" >}} +{{< desktop-install-v2 all=true win_arm_release="Beta" version="4.37.1" build_path="/178610/" >}} ### Bug fixes and enhancements #### For all platforms - Fixed an issue that caused the AI Catalog in Docker Hub to be unavailable in Docker Desktop. -- Fixed an issue that caused Docker Desktop to panic with `index out of range [0] with length 0` when using [Enhanced Container Isolation](/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/_index.md). +- Fixed an issue that caused Docker Desktop to panic with `index out of range [0] with length 0` when using [Enhanced Container Isolation](/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/_index.md). ### Known issues @@ -68,7 +514,7 @@ Take a look at the [Docker Public Roadmap](https://github.com/orgs/docker/projec {{< release-date date="2024-12-12" >}} -{{< desktop-install-v2 all=true beta_win_arm=true version="4.37.0" build_path="/178034/" >}} +{{< desktop-install-v2 all=true win_arm_release="Beta" version="4.37.0" build_path="/178034/" >}} ### New @@ -92,6 +538,11 @@ Take a look at the [Docker Public Roadmap](https://github.com/orgs/docker/projec - Fixed a bug where resetting default settings would also reset the CLI context. - Fixed a bug where the Docker Desktop Dashboard would get out of sync with the Docker daemon after restarting the engine while in Resource Saver mode (Windows with WSL2 backend only) or after switching engines (macOS). - Fixed a bug where Resource Saver mode would fail to re-engage after restarting the engine while in Resource Saver mode. +- Build UI: + - Fixed a bug where the source file could not be found for some builds. + - Fixed a bug where error logs were not displayed in the **Source** tab. + - Fixed a bug where users had to scroll to the bottom for error logs in **Source** tab. + - Fixed a bug where timestamps would be broken in the **Logs** tab. #### For Mac @@ -141,19 +592,19 @@ Take a look at the [Docker Public Roadmap](https://github.com/orgs/docker/projec {{< release-date date="2024-11-18" >}} -{{< desktop-install-v2 all=true beta_win_arm=true version="4.36.0" build_path="/175267/" >}} +{{< desktop-install-v2 all=true win_arm_release="Beta" version="4.36.0" build_path="/175267/" >}} ### New - Existing Docker Desktop installations using the WSL2 engine on Windows are now automatically migrated to a unified single-distribution architecture for enhanced consistency and performance. - Administrators can now: - - Enforce sign-in with macOS [configuration profiles](/manuals/security/for-admins/enforce-sign-in/methods.md#configuration-profiles-method-mac-only) (Early Access). + - Enforce sign-in with macOS [configuration profiles](/manuals/enterprise/security/enforce-sign-in/methods.md#configuration-profiles-method-mac-only) (Early Access). - Enforce sign-in for more than one organization at a time (Early Access). - - Deploy Docker Desktop for Mac in bulk with the [PKG installer](/manuals/desktop/setup/install/enterprise-deployment/pkg-install-and-configure.md) (Early Access). + - Deploy Docker Desktop for Mac in bulk with the [PKG installer](/manuals/enterprise/enterprise-deployment/pkg-install-and-configure.md) (Early Access). - Use Desktop Settings Management to manage and enforce defaults via admin.docker.com (Early Access). - Enhance Container Isolation (ECI) has been improved to: - - Allow admins to [turn off Docker socket mount restrictions](/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/config.md#allowing-all-containers-to-mount-the-docker-socket). - - Support wildcard tags when using the [`allowedDerivedImages` setting](/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/config.md#docker-socket-mount-permissions-for-derived-images). + - Allow admins to [turn off Docker socket mount restrictions](/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/config.md#allowing-all-containers-to-mount-the-docker-socket). + - Support wildcard tags when using the [`allowedDerivedImages` setting](/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/config.md#docker-socket-mount-permissions-for-derived-images). ### Upgrades @@ -178,6 +629,13 @@ Take a look at the [Docker Public Roadmap](https://github.com/orgs/docker/projec - Fixed a bug that restricted containers using `--network=host` to 18 open host ports. - Fixed bind mount ownership for non-root containers. Fixes [docker/for-mac#6243](https://github.com/docker/for-mac/issues/6243). - Docker Desktop will not unpause automatically after a manual pause. The system will stay paused until you manually resume the Docker engine. This fixes a bug where other software would accidentally trigger a resume by running a CLI command in the background. Fixes [for-mac/#6908](https://github.com/docker/for-mac/issues/6908) +- Build UI: + - The **Source** tab now supports multiple source files. + - Links for image dependencies in the **Info** tab now support other well-known registries such as GitHub, Google, and GitLab. + - Disabled the **Delete** button if only cloud builds are selected. + - Fixed an issue where users were unable to delete builds. + - Fixed malformed Jaeger traces that were missing events and links. + - Fixed missing export attributes when building with the cloud driver. #### For Mac @@ -214,7 +672,7 @@ Take a look at the [Docker Public Roadmap](https://github.com/orgs/docker/projec {{< release-date date="2024-10-30" >}} -{{< desktop-install-v2 all=true beta_win_arm=true version="4.35.1" build_path="/173168/" >}} +{{< desktop-install-v2 all=true win_arm_release="Beta" version="4.35.1" build_path="/173168/" >}} #### For all platforms @@ -224,7 +682,7 @@ Take a look at the [Docker Public Roadmap](https://github.com/orgs/docker/projec {{< release-date date="2024-10-24" >}} -{{< desktop-install-v2 all=true beta_win_arm=true version="4.35.0" build_path="/172550/" >}} +{{< desktop-install-v2 all=true win_arm_release="Beta" version="4.35.0" build_path="/172550/" >}} ### New @@ -255,7 +713,7 @@ Take a look at the [Docker Public Roadmap](https://github.com/orgs/docker/projec - Fixed a bug where the **Push to Docker Hub** action in the **Images** view would result in an `invalid tag format` error. Fixes [docker/for-win#14258](https://github.com/docker/for-win/issues/14258). - Fixed an issue where Docker Desktop startup failed when ICMPv6 setup was not successful. - Added drivers that allow USB/IP to work. -- Fixed a bug in Enhanced Container Isolation (ECI) [Docker socket mount permissions for derived images](/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/config.md) where it was incorrectly denying Docker socket mounts for some images when Docker Desktop uses the containerd image store. +- Fixed a bug in Enhanced Container Isolation (ECI) [Docker socket mount permissions for derived images](/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/config.md) where it was incorrectly denying Docker socket mounts for some images when Docker Desktop uses the containerd image store. - Enable `NFT_NUMGEN`, `NFT_FIB_IPV4` and `NFT_FIB_IPV6` kernel modules. - Build UI: - Highlight build check warnings in the **Completed builds** list. @@ -263,7 +721,7 @@ Take a look at the [Docker Public Roadmap](https://github.com/orgs/docker/projec - Image tags added to **Build results** section under the **Info** tab. - Improved efficiency of host-side disk utilization for fresh installations on Mac and Linux. - Fixed a bug that prevented the Sign in enforcement popup to be triggered when token expires. -- Fixed a bug where containers would not be displayed in the GUI immediately after signing in when using [enforced sign-in](/manuals/security/for-admins/enforce-sign-in/_index.md). +- Fixed a bug where containers would not be displayed in the GUI immediately after signing in when using [enforced sign-in](/manuals/enterprise/security/enforce-sign-in/_index.md). - `settings.json` has been renamed to `settings-store.json` - The host networking feature no longer requires users to be signed-in in order to use it. @@ -318,7 +776,7 @@ Take a look at the [Docker Public Roadmap](https://github.com/orgs/docker/projec {{< release-date date="2024-10-09" >}} -{{< desktop-install-v2 all=true beta_win_arm=true version="4.34.3" build_path="/170107/" >}} +{{< desktop-install-v2 all=true win_arm_release="Beta" version="4.34.3" build_path="/170107/" >}} ### Upgrades @@ -334,7 +792,7 @@ Take a look at the [Docker Public Roadmap](https://github.com/orgs/docker/projec {{< release-date date="2024-09-12" >}} -{{< desktop-install-v2 all=true beta_win_arm=true version="4.34.2" build_path="/167172/" >}} +{{< desktop-install-v2 all=true win_arm_release="Beta" version="4.34.2" build_path="/167172/" >}} ### Bug fixes and enhancements @@ -351,7 +809,7 @@ Take a look at the [Docker Public Roadmap](https://github.com/orgs/docker/projec {{< release-date date="2024-09-05" >}} -{{< desktop-install-v2 win=true beta_win_arm=true version="4.34.1" build_path="/166053/" >}} +{{< desktop-install-v2 win=true win_arm_release="Beta" version="4.34.1" build_path="/166053/" >}} ### Bug fixes and enhancements @@ -363,15 +821,15 @@ Take a look at the [Docker Public Roadmap](https://github.com/orgs/docker/projec {{< release-date date="2024-08-29" >}} -{{< desktop-install-v2 all=true beta_win_arm=true version="4.34.0" build_path="/165256/" >}} +{{< desktop-install-v2 all=true win_arm_release="Beta" version="4.34.0" build_path="/165256/" >}} ### New - [Host networking](/manuals/engine/network/drivers/host.md#docker-desktop) support on Docker Desktop is now generally available. - If you authenticate via the CLI, you can now authenticate through a browser-based flow, removing the need for manual PAT generation. - Windows now supports automatic reclamation of disk space in Docker Desktop for WSL2 installations [using a managed virtual hard disk](/manuals/desktop/features/wsl/best-practices.md). -- Deploying Docker Desktop via the [MSI installer](/manuals/desktop/setup/install/enterprise-deployment/msi-install-and-configure.md) is now generally available. -- Two new methods to [enforce sign-in](/manuals/security/for-admins/enforce-sign-in/_index.md) (windows registry key and `.plist` file) are now generally available. +- Deploying Docker Desktop via the [MSI installer](/manuals/enterprise/enterprise-deployment/msi-install-and-configure.md) is now generally available. +- Two new methods to [enforce sign-in](/manuals/enterprise/security/enforce-sign-in/_index.md) (windows registry key and `.plist` file) are now generally available. - Fresh installations of Docker Desktop now use the containerd image store by default. - [Compose Bridge](/manuals/compose/bridge/_index.md) (Experimental) is now available from the Compose file viewer. Easily convert and deploy your Compose project to a Kubernetes cluster. @@ -418,8 +876,8 @@ Take a look at the [Docker Public Roadmap](https://github.com/orgs/docker/projec > [!NOTE] > Using `docker login` with an address that includes URL path segments is not a documented use case and is considered unsupported. The recommended usage is to specify only a registry hostname, and optionally a port, as the address for `docker login`. - When running `docker compose up` and Docker Desktop is in the Resource Saver mode, the command is unresponsive. As a workaround, manually exit the Resource Saving mode and Docker Compose becomes responsive again. -- When [Enhanced Container Isolation (ECI)](/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/_index.md) is enabled, Docker Desktop may not enter Resource Saver mode. This will be fixed in a future Docker Desktop release. -- The new [ECI Docker socket mount permissions for derived images](/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/config.md#docker-socket-mount-permissions-for-derived-images) feature does not yet work when Docker Desktop is configured with the **Use containerd for pulling and storing images**. This will be fixed in the next Docker Desktop release. +- When [Enhanced Container Isolation (ECI)](/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/_index.md) is enabled, Docker Desktop may not enter Resource Saver mode. This will be fixed in a future Docker Desktop release. +- The new [ECI Docker socket mount permissions for derived images](/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/config.md#docker-socket-mount-permissions-for-derived-images) feature does not yet work when Docker Desktop is configured with the **Use containerd for pulling and storing images**. This will be fixed in the next Docker Desktop release. ## 4.33.2 @@ -443,7 +901,7 @@ Take a look at the [Docker Public Roadmap](https://github.com/orgs/docker/projec {{< release-date date="2024-07-31" >}} -{{< desktop-install-v2 win=true beta_win_arm=true version="4.33.0" build_path="/161083/" >}} +{{< desktop-install-v2 win=true win_arm_release="Beta" version="4.33.0" build_path="/161083/" >}} ### Bug fixes and enhancements @@ -455,7 +913,7 @@ Take a look at the [Docker Public Roadmap](https://github.com/orgs/docker/projec {{< release-date date="2024-07-25" >}} -{{< desktop-install-v2 all=true beta_win_arm=true version="4.33.0" build_path="/160616/" >}} +{{< desktop-install-v2 all=true win_arm_release="Beta" version="4.33.0" build_path="/160616/" >}} ### New @@ -547,7 +1005,7 @@ For more information, see [microsoft/WSL#11794](https://github.com/microsoft/WSL {{< release-date date="2024-07-04" >}} -{{< desktop-install-v2 all=true beta_win_arm=true version="4.32.0" build_path="/157355/" >}} +{{< desktop-install-v2 all=true win_arm_release="Beta" version="4.32.0" build_path="/157355/" >}} ### New @@ -570,7 +1028,7 @@ For more information, see [microsoft/WSL#11794](https://github.com/microsoft/WSL - Improved instructions for `watch` in the Compose File Viewer - Added support for Golang projects that don't have dependencies in Docker Init. Addresses [docker/roadmap#611](https://github.com/docker/roadmap/issues/611) -- [Settings Management](/manuals/security/for-admins/hardened-desktop/settings-management/_index.md) now lets admins set the default value to `ProxyEnableKerberosNTLM`. +- [Settings Management](/manuals/enterprise/security/hardened-desktop/settings-management/_index.md) now lets admins set the default value to `ProxyEnableKerberosNTLM`. - Removed a temporary compatibility fix for older versions of Visual Studio Code. - Builds view: - Changed icon for imported build record to a "files" icon. @@ -615,10 +1073,6 @@ For more information, see [microsoft/WSL#11794](https://github.com/microsoft/WSL ## 4.31.1 -{{< release-date date="2024-06-10" >}} - -{{< desktop-install win=true beta_win_arm=true version="4.31.1" build_path="/153621/" >}} - ### Bug fixes and enhancements #### For Windows @@ -627,13 +1081,9 @@ For more information, see [microsoft/WSL#11794](https://github.com/microsoft/WSL ## 4.31.0 -{{< release-date date="2024-06-06" >}} - -{{< desktop-install all=true beta_win_arm=true version="4.31.0" build_path="/153195/" >}} - ### New -- [Air-Gapped Containers](/manuals/security/for-admins/hardened-desktop/air-gapped-containers.md) is now generally available. +- [Air-Gapped Containers](/manuals/enterprise/security/hardened-desktop/air-gapped-containers.md) is now generally available. - Docker Compose File Viewer shows your Compose YAML with syntax highlighting and contextual links to relevant docs (Beta, progressive rollout). - New Sidebar user experience. @@ -657,7 +1107,7 @@ For more information, see [microsoft/WSL#11794](https://github.com/microsoft/WSL - Added `proxyEnableKerberosNTLM` config to `settings.json` to enable fallback to basic proxy authentication if Kerberos/NTLM environment is not properly set up. - Fixed a bug where Docker Debug was not working properly with Enhanced Container Isolation enabled. - Fixed a bug where UDP responses were not truncated properly. -- Fixed a bug where the **Update** screen was hidden when using [Settings Management](/manuals/security/for-admins/hardened-desktop/settings-management/_index.md). +- Fixed a bug where the **Update** screen was hidden when using [Settings Management](/manuals/enterprise/security/hardened-desktop/settings-management/_index.md). - Fixed a bug where proxy settings defined in `admin-settings.json` were not applied correctly on startup. - Fixed a bug where the **Manage Synchronized file shares with Compose** toggle did not correctly reflect the value with the feature. - Fixed a bug where a bind mounted file modified on host is not updated after the container restarts, when gRPC FUSE file sharing is used on macOS and on Windows with Hyper-V. Fixes [docker/for-mac#7274](https://github.com/docker/for-mac/issues/7274), [docker/for-win#14060](https://github.com/docker/for-win/issues/14060). @@ -715,14 +1165,14 @@ For more information, see [microsoft/WSL#11794](https://github.com/microsoft/WSL {{< release-date date="2024-05-06" >}} -{{< desktop-install all=true beta_win_arm=true version="4.30.0" build_path="/149282/" >}} +{{< desktop-install all=true win_arm_release="Beta" version="4.30.0" build_path="/149282/" >}} ### New #### For all platforms - Docker Desktop now supports [SOCKS5 proxies](/manuals/desktop/features/networking.md#socks5-proxy-support). Requires a Business subscription. -- Added a new setting to manage the onboarding survey in [Settings Management](/manuals/security/for-admins/hardened-desktop/settings-management/_index.md). +- Added a new setting to manage the onboarding survey in [Settings Management](/manuals/enterprise/security/hardened-desktop/settings-management/_index.md). #### For Windows @@ -797,18 +1247,18 @@ This can be resolved by adding the user to the **docker-users** group. Before st {{< release-date date="2024-04-08" >}} -{{< desktop-install all=true beta_win_arm=true version="4.29.0" build_path="/145265/" >}} +{{< desktop-install all=true win_arm_release="Beta" version="4.29.0" build_path="/145265/" >}} ### New -- You can now enforce Rosetta usage via [Settings Management](/manuals/security/for-admins/hardened-desktop/settings-management/_index.md). -- [Docker socket mount restrictions](/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/config.md) with ECI is now generally available. +- You can now enforce Rosetta usage via [Settings Management](/manuals/enterprise/security/hardened-desktop/settings-management/_index.md). +- [Docker socket mount restrictions](/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/config.md) with ECI is now generally available. - Docker Engine and CLI updated to [Moby 26.0](https://github.com/moby/moby/releases/tag/v26.0.0). This includes Buildkit 0.13, sub volumes mounts, networking updates, and improvements to the containerd multi-platform image store UX. - New and improved Docker Desktop error screens: swift troubleshooting, easy diagnostics uploads, and actionable remediation. - Compose supports [Synchronized file shares (experimental)](/manuals/desktop/features/synchronized-file-sharing.md). - New [interactive Compose CLI (experimental)](/manuals/compose/how-tos/environment-variables/envvars.md#compose_menu). - Beta release of: - - Air-Gapped Containers with [Settings Management](/manuals/security/for-admins/hardened-desktop/air-gapped-containers/_index.md). + - Air-Gapped Containers with [Settings Management](/manuals/enterprise/security/hardened-desktop/air-gapped-containers.md). - [Host networking](/manuals/engine/network/drivers/host.md#docker-desktop) in Docker Desktop. - [Docker Debug](use-desktop/container.md#integrated-terminal) for running containers. - [Volumes Backup & Share extension](use-desktop/volumes.md) functionality available in the **Volumes** tab. @@ -879,7 +1329,7 @@ This can be resolved by adding the user to the **docker-users** group. Before st ### New -- [Settings Management](/manuals/security/for-admins/hardened-desktop/settings-management/_index.md) now allows admins to set the default file-sharing implementation and specify which paths developer can add file shares to. +- [Settings Management](/manuals/enterprise/security/hardened-desktop/settings-management/_index.md) now allows admins to set the default file-sharing implementation and specify which paths developer can add file shares to. - Added support for `socks5://` HTTP and HTTPS proxy URLs when the [`SOCKS` proxy support beta feature](/manuals/desktop/features/networking.md) is enabled. - Users can now filter volumes to see which ones are in use in the **Volumes** tab. @@ -998,7 +1448,7 @@ This can be resolved by adding the user to the **docker-users** group. Before st - Docker init now supports Java and is generally available to all users. - [Synchronized File Shares](/manuals/desktop/features/synchronized-file-sharing.md) provides fast and flexible host-to-VM file sharing within Docker Desktop. Utilizing the technology behind [Docker’s acquisition of Mutagen](https://www.docker.com/blog/mutagen-acquisition/), this feature provides an alternative to virtual bind mounts that uses synchronized filesystem caches, improving performance for developers working with large codebases. -- Organization admins can now [configure Docker socket mount permissions](/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/config.md) when ECI is enabled. +- Organization admins can now [configure Docker socket mount permissions](/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/config.md) when ECI is enabled. - [Containerd Image Store](/manuals/desktop/features/containerd.md) support is now generally available to all users. - Get a debug shell into any container or image with the new [`docker debug` command](/reference/cli/docker/debug.md) (Beta). - Organization admins, with a Docker Business subscription, can now configure a custom list of extensions with [Private Extensions Marketplace](/manuals/extensions/private-marketplace.md) enabled (Beta) @@ -1099,7 +1549,7 @@ This can be resolved by adding the user to the **docker-users** group. Before st ### New -- Administrators can now control access to beta and experimental features in the **Features in development** tab with [Settings Management](/manuals/security/for-admins/hardened-desktop/settings-management/_index.md). +- Administrators can now control access to beta and experimental features in the **Features in development** tab with [Settings Management](/manuals/enterprise/security/hardened-desktop/settings-management/_index.md). - Introduced four new version update states in the footer. - `docker init` (Beta) now supports PHP with Apache + Composer. - The [**Builds** view](use-desktop/builds.md) is now GA. You can now inspect builds, troubleshoot errors, and optimize build speed. @@ -1209,7 +1659,7 @@ This can be resolved by adding the user to the **docker-users** group. Before st - Rosetta is now Generally Available for all users on macOS 13 or later. It provides faster emulation of Intel-based images on Apple Silicon. To use Rosetta, see [Settings](/manuals/desktop/settings-and-maintenance/settings.md). Rosetta is enabled by default on macOS 14.1 and later. - Docker Desktop now detects if a WSL version is out of date. If an out dated version of WSL is detected, you can allow Docker Desktop to automatically update the installation or you can manually update WSL outside of Docker Desktop. - New installations of Docker Desktop for Windows now require a Windows version of 19044 or later. -- Administrators now have the ability to control Docker Scout image analysis in [Settings Management](/manuals/security/for-admins/hardened-desktop/settings-management/_index.md). +- Administrators now have the ability to control Docker Scout image analysis in [Settings Management](/manuals/enterprise/security/hardened-desktop/settings-management/_index.md). ### Upgrades @@ -1344,7 +1794,7 @@ This can be resolved by adding the user to the **docker-users** group. Before st #### For Mac -- Fixed a kernel panic on Apple silicon Macs with macOS version below 12.5. Fixes [docker/for-mac#6975](https://github.com/docker/for-mac/issues/6975). +- Fixed a kernel panic on Apple Silicon Macs with macOS version earlier than 12.5. Fixes [docker/for-mac#6975](https://github.com/docker/for-mac/issues/6975). - Fixed a bug where Docker Desktop failed to start if invalid directories were included in `filesharingDirectories`. Fixes [docker/for-mac#6980](https://github.com/docker/for-mac/issues/6980). - Fixed a bug where installer is creating root-owned directories. Fixes [docker/for-mac#6984](https://github.com/docker/for-mac/issues/6984). - Fixed a bug where installer is failing on setting up the docker socket when missing `/Library/LaunchDaemons`. Fixes [docker/for-mac#6967](https://github.com/docker/for-mac/issues/6967). @@ -1472,7 +1922,7 @@ This can be resolved by adding the user to the **docker-users** group. Before st #### For all platforms -- [Settings Management](/manuals/security/for-admins/hardened-desktop/settings-management/_index.md) now lets you turn off Docker Extensions for your organisation. +- [Settings Management](/manuals/enterprise/security/hardened-desktop/settings-management/_index.md) now lets you turn off Docker Extensions for your organisation. - Fixed a bug where turning on Kubernetes from the UI failed when the system was paused. - Fixed a bug where turning on Wasm from the UI failed when the system was paused. - Bind mounts are now shown when you [inspect a container](use-desktop/container.md). @@ -2104,7 +2554,7 @@ This can be resolved by adding the user to the **docker-users** group. Before st ### New -- Two new security features have been introduced for Docker Business users, Settings Management and Enhanced Container Isolation. Read more about Docker Desktop’s new [Hardened Docker Desktop security model](/manuals/security/for-admins/hardened-desktop/_index.md). +- Two new security features have been introduced for Docker Business users, Settings Management and Enhanced Container Isolation. Read more about Docker Desktop’s new [Hardened Docker Desktop security model](/manuals/enterprise/security/hardened-desktop/_index.md). - Added the new Dev Environments CLI `docker dev`, so you can create, list, and run Dev Envs via command line. Now it's easier to integrate Dev Envs into custom scripts. - Docker Desktop can now be installed to any drive and folder using the `--installation-dir`. Partially addresses [docker/roadmap#94](https://github.com/docker/roadmap/issues/94). @@ -2801,7 +3251,7 @@ Installing Docker Desktop 4.5.0 from scratch has a bug which defaults Docker Des ### New - Easy, Secure sign in with Auth0 and Single Sign-on - - Single Sign-on: Users with a Docker Business subscription can now configure SSO to authenticate using their identity providers (IdPs) to access Docker. For more information, see [Single Sign-on](../security/for-admins/single-sign-on/_index.md). + - Single Sign-on: Users with a Docker Business subscription can now configure SSO to authenticate using their identity providers (IdPs) to access Docker. For more information, see [Single Sign-on](/manuals/enterprise/security/single-sign-on/_index.md). - Signing in to Docker Desktop now takes you through the browser so that you get all the benefits of auto-filling from password managers. ### Upgrades diff --git a/content/manuals/desktop/settings-and-maintenance/backup-and-restore.md b/content/manuals/desktop/settings-and-maintenance/backup-and-restore.md index b985426334c2..8b7995d5ef47 100644 --- a/content/manuals/desktop/settings-and-maintenance/backup-and-restore.md +++ b/content/manuals/desktop/settings-and-maintenance/backup-and-restore.md @@ -1,29 +1,29 @@ --- title: How to back up and restore your Docker Desktop data +linkTitle: Backup and restore data keywords: Docker Desktop, backup, restore, migration, reinstall, containers, images, volumes weight: 20 aliases: - - /desktop/backup-and-restore.md + - /desktop/backup-and-restore/ --- -Use the following procedure to save and restore your images and container data. This is useful if you want to reset your VM disk or to move your Docker environment to a new -computer, for example. +Use this procedure to back up and restore your images and container data. This is useful if you want to reset your VM disk or to move your Docker environment to a new computer, or recover from a failed Docker Desktop update or installation. > [!IMPORTANT] > > If you use volumes or bind-mounts to store your container data, backing up your containers may not be needed, but make sure to remember the options that were used when creating the container or use a [Docker Compose file](/reference/compose-file/_index.md) if you want to re-create your containers with the same configuration after re-installation. -## Save your data +## If Docker Desktop is functioning normally + +### Save your data 1. Commit your containers to an image with [`docker container commit`](/reference/cli/docker/container/commit.md). - Committing a container stores the container filesystem changes and some of the - container's configuration, for example labels and environment-variables, as a local image. Be aware that environment variables may contain sensitive - information such as passwords or proxy-authentication, so care should be taken - when pushing the resulting image to a registry. + Committing a container stores filesystem changes and some container configurations, such as labels and environment variables, as a local image. Be aware that environment variables may contain sensitive + information such as passwords or proxy-authentication, so take care when pushing the resulting image to a registry. - Also note that filesystem changes in volume that are attached to the + Also note that filesystem changes in a volume that are attached to the container are not included in the image, and must be backed up separately. If you used a [named volume](/manuals/engine/storage/_index.md#more-details-about-mount-types) to store container data, such as databases, refer to the [back up, restore, or migrate data volumes](/manuals/engine/storage/volumes.md#back-up-restore-or-migrate-data-volumes) page in the storage section. @@ -31,24 +31,95 @@ computer, for example. 2. Use [`docker push`](/reference/cli/docker/image/push.md) to push any images you have built locally and want to keep to the [Docker Hub registry](/manuals/docker-hub/_index.md). - Make sure to configure the [repository's visibility as "private"](/manuals/docker-hub/repos/_index.md) - for images that should not be publicly accessible. + > [!TIP] + > + > [Set the repository visibility to private](/manuals/docker-hub/repos/_index.md) if your image includes sensitive content. Alternatively, use [`docker image save -o images.tar image1 [image2 ...]`](/reference/cli/docker/image/save.md) - to save any images you want to keep to a local tar file. + to save any images you want to keep to a local `.tar` file. After backing up your data, you can uninstall the current version of Docker Desktop and [install a different version](/manuals/desktop/release-notes.md) or reset Docker Desktop to factory defaults. -## Restore your data +### Restore your data -1. Use [`docker pull`](/reference/cli/docker/image/pull.md) to restore images - you pushed to Docker Hub. +1. Load your images. - If you backed up your images to a local tar file, use [`docker image load -i images.tar`](/reference/cli/docker/image/load.md) - to restore previously saved images. + - If you pushed to Docker Hub: + + ```console + $ docker pull + ``` + + - If you saved a `.tar` file: + + ```console + $ docker image load -i images.tar + ``` 2. Re-create your containers if needed, using [`docker run`](/reference/cli/docker/container/run.md), or [Docker Compose](/manuals/compose/_index.md). -Refer to the [backup, restore, or migrate data volumes](/manuals/engine/storage/volumes.md#back-up-restore-or-migrate-data-volumes) page in the storage section to restore volume data. +To restore volume data, refer to [backup, restore, or migrate data volumes](/manuals/engine/storage/volumes.md#back-up-restore-or-migrate-data-volumes). + +## If Docker Desktop fails to start + +If Docker Desktop cannot launch and must be reinstalled, you can back up its VM disk and image data directly from disk. Docker Desktop must be fully stopped before backing up these files. + +{{< tabs >}} +{{< tab name="Windows" >}} + +1. Back up Docker containers/images. + + Backup the following file: + + ```console + %LOCALAPPDATA%\Docker\wsl\data\docker_data.vhdx + ``` + + Copy it to a safe location. + +1. Back up WSL distributions. + + If you're running any WSL Linux distributions (Ubuntu, Alpine, etc.), back them up using [Microsoft's guide](https://learn.microsoft.com/en-us/windows/wsl/faq#how-can-i-back-up-my-wsl-distributions-). + +1. Restore. + + After reinstalling Docker Desktop, restore the `docker_data.vhdx` to the same location and re-import your WSL distributions if needed. + +{{< /tab >}} +{{< tab name="Mac" >}} + +1. Back up Docker containers/images. + + Backup the following file: + + ```console + ~/Library/Containers/com.docker.docker/Data/vms/0/data/Docker.raw + ``` + + Copy it to a safe location. + +1. Restore. + + After reinstalling Docker Desktop, restore the `Docker.raw` to the same location. + +{{< /tab >}} +{{< tab name="Linux" >}} + +1. Back up Docker containers/images: + + Backup the following file: + + ```console + ~/.docker/desktop/vms/0/data/Docker.raw + ``` + + Copy it to a safe location. + +1. Restore. + + After reinstalling Docker Desktop, restore the `Docker.raw` to the same location. + +{{< /tab >}} +{{< /tabs >}} \ No newline at end of file diff --git a/content/manuals/desktop/settings-and-maintenance/settings.md b/content/manuals/desktop/settings-and-maintenance/settings.md index 9080ee5e2e85..720b68017aff 100644 --- a/content/manuals/desktop/settings-and-maintenance/settings.md +++ b/content/manuals/desktop/settings-and-maintenance/settings.md @@ -34,6 +34,8 @@ On the **General** tab, you can configure when to start Docker and specify other - **Choose theme for Docker Desktop**. Choose whether you want to apply a **Light** or **Dark** theme to Docker Desktop. Alternatively you can set Docker Desktop to **Use system settings**. +- **Configure shell completions**. Automatically edits your shell configuration and gives you word completion for commands, flags, and Docker objects (such as container and volume names) when you hit `` as you type into your terminal. For more information, see [Completion](/manuals/engine/cli/completion.md). + - **Choose container terminal**. Determines which terminal is launched when opening the terminal from a container. If you choose the integrated terminal, you can run commands in a running container straight from the Docker Desktop Dashboard. For more information, see [Explore containers](/manuals/desktop/use-desktop/container.md). @@ -44,6 +46,12 @@ If you choose the integrated terminal, you can run commands in a running contain - {{< badge color=blue text="Mac only" >}}**Include VM in Time Machine backups**. Select to back up the Docker Desktop virtual machine. This option is turned off by default. +- **Use containerd for pulling and storing images**. + Turns on the containerd image store. + This brings new features like faster container startup performance by lazy-pulling images, + and the ability to run Wasm applications with Docker. + For more information, see [containerd image store](/manuals/desktop/features/containerd.md). + - {{< badge color=blue text="Windows only" >}}**Expose daemon on tcp://localhost:2375 without TLS**. Check this option to enable legacy clients to connect to the Docker daemon. You must use this option with caution as exposing the daemon without TLS can result in remote code @@ -54,12 +62,6 @@ If you choose the integrated terminal, you can run commands in a running contain - {{< badge color=blue text="Windows only" >}}**Add the `*.docker.internal` names to the host's `/etc/hosts` file (Password required)**. Lets you resolve `*.docker.internal` DNS names from both the host and your containers. -- **Use containerd for pulling and storing images**. - Turns on the containerd image store. - This brings new features like faster container startup performance by lazy-pulling images, - and the ability to run Wasm applications with Docker. - For more information, see [containerd image store](/manuals/desktop/features/containerd.md). - - {{< badge color=blue text="Mac only" >}} **Choose Virtual Machine Manager (VMM)**. Choose the Virtual Machine Manager for creating and managing the Docker Desktop Linux VM. - Select **Docker VMM** for the latest and most performant Hypervisor/Virtual Machine Manager. This option is available only on Apple Silicon Macs running macOS 12.5 or later and is currently in Beta. > [!TIP] @@ -81,14 +83,14 @@ If you choose the integrated terminal, you can run commands in a running contain troubleshoot the application. Clear the checkbox to opt out. Docker may periodically prompt you for more information. -- **Use Enhanced Container Isolation**. Select to enhance security by preventing containers from breaching the Linux VM. For more information, see [Enhanced Container Isolation](/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/_index.md). +- **Use Enhanced Container Isolation**. Select to enhance security by preventing containers from breaching the Linux VM. For more information, see [Enhanced Container Isolation](/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/_index.md). > [!NOTE] > > This setting is only available if you are signed in to Docker Desktop and have a Docker Business subscription. - **Show CLI hints**. Displays CLI hints and tips when running Docker commands in the CLI. This is turned on by default. To turn CLI hints on or off from the CLI, set `DOCKER_CLI_HINTS` to `true` or `false` respectively. -- **SBOM Indexing**. When this option is enabled, inspecting an image in Docker Desktop shows a **Start analysis** button that, when selected, analyzes the image with Docker Scout. +- **Enable Scout image analysis**. When this option is enabled, inspecting an image in Docker Desktop shows a **Start analysis** button that, when selected, analyzes the image with Docker Scout. - **Enable background SBOM indexing**. When this option is enabled, Docker Scout automatically analyzes images that you build or pull. @@ -128,7 +130,7 @@ Advanced settings are: - **Swap**. Configure swap file size as needed. The default is 1 GB. -- **Virtual disk limit**. Specify the maximum size of the disk image. +- **Disk usage limit**. Specify the maximum amount of disk space the engine can use. - **Disk image location**. Specify the location of the Linux volume where containers and images are stored. @@ -185,7 +187,7 @@ File share settings are: - **Remove a Directory**. Select `-` next to the directory you want to remove -- **Apply & Restart** makes the directory available to containers using Docker's +- **Apply** makes the directory available to containers using Docker's bind mount (`-v`) feature. > [!TIP] @@ -254,7 +256,7 @@ To set a different proxy for Docker Desktop, turn on **Manual proxy configuratio upstream proxy URL of the form `http://proxy:port` or `https://proxy:port`. To prevent developers from accidentally changing the proxy settings, see -[Settings Management](/manuals/security/for-admins/hardened-desktop/settings-management/_index.md#what-features-can-i-configure-with-settings-management). +[Settings Management](/manuals/enterprise/security/hardened-desktop/settings-management/_index.md#what-features-can-i-configure-with-settings-management). The HTTPS proxy settings used for scanning images are set using the `HTTPS_PROXY` environment variable. @@ -268,7 +270,7 @@ The HTTPS proxy settings used for scanning images are set using the `HTTPS_PROXY > settings via the Docker CLI configuration file (`config.json`). > > To manage proxy configurations for Docker Desktop, configure the settings in -> the Docker Desktop app or use [Settings Management](/manuals/security/for-admins/hardened-desktop/settings-management/_index.md). +> the Docker Desktop app or use [Settings Management](/manuals/enterprise/security/hardened-desktop/settings-management/_index.md). #### Proxy authentication @@ -305,6 +307,8 @@ To enable Kerberos or NTLM proxy authentication you must pass the `--proxy-enabl Docker Desktop uses a private IPv4 network for internal services such as a DNS server and an HTTP proxy. In case Docker Desktop's choice of subnet clashes with IPs in your environment, you can specify a custom subnet using the **Network** setting. +On Windows and Mac, you can also set the default networking mode and DNS resolution behavior. For more information, see [Networking](/manuals/desktop/features/networking.md#networking-mode-and-dns-behaviour-for-mac-and-windows). + On Mac, you can also select the **Use kernel networking for UDP** setting. This lets you use a more efficient kernel networking path for UDP. This may not be compatible with your VPN software. ### WSL Integration @@ -346,7 +350,7 @@ edit the file using your favorite text editor. To see the full list of possible configuration options, see the [dockerd command reference](/reference/cli/dockerd/). -Select **Apply & Restart** to save your settings and restart Docker Desktop. +Select **Apply** to save your settings. ## Builders @@ -425,6 +429,10 @@ deploying your Docker workloads on Kubernetes. To turn on Kubernetes support and install a standalone instance of Kubernetes running as a Docker container, select **Enable Kubernetes**. +With Docker Desktop version 4.38 and later, you can choose your cluster provisioning method: + - **Kubeadm** creates a single-node cluster and the version is set by Docker Desktop. + - **kind** creates a multi-node cluster and you can set the version and number of nodes. + Select **Show system containers (advanced)** to view internal containers when using Docker commands. @@ -450,6 +458,10 @@ when an update becomes available. After downloading the update, select **Apply and Restart** to install the update. You can do this either through the Docker menu or in the **Updates** section in the Docker Desktop Dashboard. +> [!TIP] +> +> With Docker Desktop version 4.38 and later, components of Docker Desktop, such as Docker Compose, Docker Scout, and the Docker CLI, can be updated independently without the need for a full restart. This feature is still in Beta. + ## Extensions Use the **Extensions** tab to: @@ -460,13 +472,7 @@ Use the **Extensions** tab to: For more information about Docker extensions, see [Extensions](/manuals/extensions/_index.md). -## Features in development - -On the **Feature control** tab you can control your settings for **Beta features** and **Experimental features**. - -You can also sign up to the [Developer Preview program](https://www.docker.com/community/get-involved/developer-preview/) from the **Features in development** tab. - -### Beta features +## Beta features Beta features provide access to future product functionality. These features are intended for testing and feedback only as they may change @@ -474,27 +480,30 @@ between releases without warning or remove them entirely from a future release. Beta features must not be used in production environments. Docker doesn't offer support for beta features. -### Experimental features - -Experimental features provide early access to future product functionality. -These features are intended for testing and feedback only as they may change -between releases without warning or can be removed entirely from a future -release. Experimental features must not be used in production environments. -Docker does not offer support for experimental features. +You can also sign up to the [Developer Preview program](https://www.docker.com/community/get-involved/developer-preview/) from the **Beta features** tab. For a list of current experimental features in the Docker CLI, see [Docker CLI Experimental features](https://github.com/docker/cli/blob/master/experimental/README.md). +> [!IMPORTANT] +> +> For Docker Desktop versions 4.41 and earlier, there is also an **Experimental features** tab under the **Features in development** page. +> +> As with beta features, experimental features must not be used in production environments. Docker does not offer support for experimental features. + ## Notifications Use the **Notifications** tab to turn on or turn off notifications for the following events: - **Status updates on tasks and processes** +- **Recommendations from Docker** - **Docker announcements** - **Docker surveys** -By default, all notifications are turned on. You'll always receive error notifications and notifications about new Docker Desktop releases and updates. +By default, all general notifications are turned on. You'll always receive error notifications and notifications about new Docker Desktop releases and updates. + +You can also [configure notification settings for Docker Scout-related issues](/manuals/scout/explore/dashboard.md#notification-settings). -Notifications momentarily appear in the lower-right of the Docker Desktop Dashboard and then move to the **Notifications** drawer. To open the **Notifications** drawer, select {{< inline-image src="../images/notifications.svg" alt="notifications" >}}. +Notifications momentarily appear in the lower-right of the Docker Desktop Dashboard and then move to the **Notifications** drawer which can be accessed from the top-right of the Docker Desktop Dashboard. ## Advanced @@ -510,8 +519,8 @@ On Mac, you can reconfigure your initial installation settings on the **Advance ``` 3. Save and the close the file. Restart your shell to apply the changes to the PATH variable. -- **Enable default Docker socket (Requires password)**. Creates `/var/run/docker.sock` which some third party clients may use to communicate with Docker Desktop. For more information, see [permission requirements for macOS](/manuals/desktop/setup/install/mac-permission-requirements.md#installing-symlinks). +- **Allow the default Docker socket to be used (Requires password)**. Creates `/var/run/docker.sock` which some third party clients may use to communicate with Docker Desktop. For more information, see [permission requirements for macOS](/manuals/desktop/setup/install/mac-permission-requirements.md#installing-symlinks). -- **Enable privileged port mapping (Requires password)**. Starts the privileged helper process which binds the ports that are between 1 and 1024. For more information, see [permission requirements for macOS](/manuals/desktop/setup/install/mac-permission-requirements.md#binding-privileged-ports). +- **Allow privileged port mapping (Requires password)**. Starts the privileged helper process which binds the ports that are between 1 and 1024. For more information, see [permission requirements for macOS](/manuals/desktop/setup/install/mac-permission-requirements.md#binding-privileged-ports). - For more information on each configuration and use case, see [Permission requirements](/manuals/desktop/setup/install/mac-permission-requirements.md). +For more information on each configuration and use case, see [Permission requirements](/manuals/desktop/setup/install/mac-permission-requirements.md). diff --git a/content/manuals/desktop/setup/allow-list.md b/content/manuals/desktop/setup/allow-list.md index 28b285919ecb..02d6dcf572da 100644 --- a/content/manuals/desktop/setup/allow-list.md +++ b/content/manuals/desktop/setup/allow-list.md @@ -1,12 +1,12 @@ --- description: A list of domain URLs required for Docker Desktop to function correctly within an organization. -keywords: Docker Desktop, allowlist, allow list, firewall, authentication URLs, analytics, +keywords: Docker Desktop, allowlist, allow list, firewall, authentication URLs, analytics title: Allowlist for Docker Desktop tags: [admin] linkTitle: Allowlist weight: 100 aliases: - - /desktop/allow-list/ + - /desktop/allow-list/ --- {{< summary-bar feature_name="Allow list" >}} @@ -15,20 +15,21 @@ This page contains the domain URLs that you need to add to a firewall allowlist ## Domain URLs to allow -| Domains | Description | -|---------|-------------| -|https://api.segment.io| Analytics | -|https://cdn.segment.com| Analytics | -|https://experiments.docker.com| A/B testing | -|https://notify.bugsnag.com| Error reports | -|https://sessions.bugsnag.com| Error reports | -|https://auth.docker.io| Authentication | -|https://cdn.auth0.com| Authentication | -|https://login.docker.com| Authentication | -|https://desktop.docker.com| Update | -|https://hub.docker.com| Docker Pull/Push | -|https://registry-1.docker.io| Docker Pull/Push | -|https://production.cloudflare.docker.com| Docker Pull/Push | -|https://docker-images-prod.r2.cloudflarestorage.com| Docker Pull/Push | -|https://docker-pinata-support.s3.amazonaws.com| Troubleshooting | -|https://api.dso.docker.com| Docker Scout service | +| Domains | Description | +| ------------------------------------------------------------------------------------ | -------------------------------------------- | +| https://api.segment.io | Analytics | +| https://cdn.segment.com | Analytics | +| https://notify.bugsnag.com | Error reports | +| https://sessions.bugsnag.com | Error reports | +| https://auth.docker.io | Authentication | +| https://cdn.auth0.com | Authentication | +| https://login.docker.com | Authentication | +| https://auth.docker.com | Authentication | +| https://desktop.docker.com | Update | +| https://hub.docker.com | Docker Hub | +| https://registry-1.docker.io | Docker Pull/Push | +| https://production.cloudflare.docker.com | Docker Pull/Push (Paid plans) | +| https://docker-images-prod.6aa30f8b08e16409b46e0173d6de2f56.r2.cloudflarestorage.com | Docker Pull/Push (Personal plan / Anonymous) | +| https://docker-pinata-support.s3.amazonaws.com | Troubleshooting | +| https://api.dso.docker.com | Docker Scout service | +| https://api.docker.com | New API | diff --git a/content/manuals/desktop/setup/install/_index.md b/content/manuals/desktop/setup/install/_index.md index 3b6650ef5cdf..44d7b98a8cc7 100644 --- a/content/manuals/desktop/setup/install/_index.md +++ b/content/manuals/desktop/setup/install/_index.md @@ -5,4 +5,5 @@ title: Install weight: 10 aliases: - /desktop/install/ +- /desktop/setup/install/ --- diff --git a/content/manuals/desktop/setup/install/enterprise-deployment/_index.md b/content/manuals/desktop/setup/install/enterprise-deployment/_index.md deleted file mode 100644 index 6797126bab18..000000000000 --- a/content/manuals/desktop/setup/install/enterprise-deployment/_index.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Enterprise deployment -weight: 50 -description: If you're an IT admin, learn how to deploy Docker Desktop at scale -keywords: msi, docker desktop, windows, installation, mac, pkg, enterprise -build: - render: never -aliases: -- /desktop/install/msi/ -- /desktop/setup/install/msi/ ---- \ No newline at end of file diff --git a/content/manuals/desktop/setup/install/linux/_index.md b/content/manuals/desktop/setup/install/linux/_index.md index 5cd8e9f3ab98..f034da1a6e1f 100644 --- a/content/manuals/desktop/setup/install/linux/_index.md +++ b/content/manuals/desktop/setup/install/linux/_index.md @@ -16,7 +16,7 @@ aliases: > **Docker Desktop terms** > > Commercial use of Docker Desktop in larger enterprises (more than 250 -> employees OR more than $10 million USD in annual revenue) requires a [paid +> employees or more than $10 million USD in annual revenue) requires a [paid > subscription](https://www.docker.com/pricing/). This page contains information about general system requirements, supported platforms, and instructions on how to install Docker Desktop for Linux. @@ -27,11 +27,11 @@ This page contains information about general system requirements, supported plat > >This means images and containers deployed on the Linux Docker Engine (before installation) are not available in Docker Desktop for Linux. > -> {{< accordion title=" What is the difference between Docker Desktop for Linux and Docker Engine?" >}} +> {{< accordion title=" Docker Desktop vs Docker Engine: What's the difference?" >}} > [!IMPORTANT] > -> For commercial use of Docker Engine obtained via Docker Desktop within larger enterprises (exceeding 250 employees OR with annual revenue surpassing $10 million USD), a [paid subscription](https://www.docker.com/pricing/) is required. +> For commercial use of Docker Engine obtained via Docker Desktop within larger enterprises (exceeding 250 employees or with annual revenue surpassing $10 million USD), a [paid subscription](https://www.docker.com/pricing/) is required. Docker Desktop for Linux provides a user-friendly graphical interface that simplifies the management of containers and services. It includes Docker Engine as this is the core technology that powers Docker containers. Docker Desktop for Linux also comes with additional features like Docker Scout and Docker Extensions. @@ -82,7 +82,7 @@ Docker CLI commands target Docker Desktop. On shutdown, Docker Desktop resets the current context to the `default` context. Use the `docker context ls` command to view what contexts are available on your -machine. The current context is indicated with an asterisk (`*`); +machine. The current context is indicated with an asterisk (`*`). ```console $ docker context ls @@ -114,7 +114,7 @@ Refer to the [Docker Context documentation](/manuals/engine/manage-resources/con ## Supported platforms -Docker provides `.deb` and `.rpm` packages from the following Linux distributions +Docker provides `.deb` and `.rpm` packages for the following Linux distributions and architectures: | Platform | x86_64 / amd64 | @@ -137,8 +137,8 @@ To install Docker Desktop successfully, your Linux host must meet the following - KVM virtualization support. Follow the [KVM virtualization support instructions](#kvm-virtualization-support) to check if the KVM kernel modules are enabled and how to provide access to the KVM device. - QEMU must be version 5.2 or later. We recommend upgrading to the latest version. - systemd init system. -- Gnome, KDE, or MATE Desktop environment. - - For many Linux distributions, the Gnome environment does not support tray icons. To add support for tray icons, you need to install a Gnome extension. For example, [AppIndicator](https://extensions.gnome.org/extension/615/appindicator-support/). +- GNOME, KDE, or MATE desktop environment. + - For many Linux distributions, the GNOME environment does not support tray icons. To add support for tray icons, you need to install a GNOME extension. For example, [AppIndicator](https://extensions.gnome.org/extension/615/appindicator-support/). - At least 4 GB of RAM. - Enable configuring ID mapping in user namespaces, see [File sharing](/manuals/desktop/troubleshoot-and-support/faqs/linuxfaqs.md#how-do-i-enable-file-sharing). Note that for Docker Desktop version 4.35 and later, this is not required anymore. - Recommended: [Initialize `pass`](/manuals/desktop/setup/sign-in.md#credentials-management-for-linux-users) for credentials management. diff --git a/content/manuals/desktop/setup/install/linux/archlinux.md b/content/manuals/desktop/setup/install/linux/archlinux.md index f981d9d136ce..06ea014d235e 100644 --- a/content/manuals/desktop/setup/install/linux/archlinux.md +++ b/content/manuals/desktop/setup/install/linux/archlinux.md @@ -11,6 +11,8 @@ aliases: - /desktop/install/linux/archlinux/ --- +{{< summary-bar feature_name="Docker Desktop Archlinux" >}} + > **Docker Desktop terms** > > Commercial use of Docker Desktop in larger enterprises (more than 250 @@ -19,7 +21,7 @@ aliases: This page contains information on how to install, launch and upgrade Docker Desktop on an Arch-based distribution. -{{< summary-bar feature_name="Docker Desktop Archlinux" >}} + ## Prerequisites @@ -46,7 +48,7 @@ To install Docker Desktop successfully, you must meet the [general system requir ## Launch Docker Desktop -{{< include "desktop-linux-launch.md" >}} +{{% include "desktop-linux-launch.md" %}} ## Next steps diff --git a/content/manuals/desktop/setup/install/linux/debian.md b/content/manuals/desktop/setup/install/linux/debian.md index 8311b7512c39..7938ea07b775 100644 --- a/content/manuals/desktop/setup/install/linux/debian.md +++ b/content/manuals/desktop/setup/install/linux/debian.md @@ -27,8 +27,7 @@ To install Docker Desktop successfully, you must: - Meet the [general system requirements](_index.md#general-system-requirements). - Have a 64-bit version of Debian 12. - For a Gnome Desktop environment, you must also install AppIndicator and KStatusNotifierItem [Gnome extensions](https://extensions.gnome.org/extension/615/appindicator-support/). - -- For non-Gnome Desktop environments, `gnome-terminal` must be installed: +- If you're not using GNOME, you must install `gnome-terminal` to enable terminal access from Docker Desktop: ```console $ sudo apt install gnome-terminal @@ -43,7 +42,7 @@ Recommended approach to install Docker Desktop on Debian: 2. Download the latest [DEB package](https://desktop.docker.com/linux/main/amd64/docker-desktop-amd64.deb?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-linux-amd64). For checksums, see the [Release notes](/manuals/desktop/release-notes.md). -3. Install the package with apt as follows: +3. Install the package using `apt`: ```console $ sudo apt-get update @@ -61,7 +60,7 @@ Recommended approach to install Docker Desktop on Debian: By default, Docker Desktop is installed at `/opt/docker-desktop`. -There are a few post-install configuration steps done through the post-install script contained in the deb package. +The RPM package includes a post-install script that completes additional setup steps automatically. The post-install script: @@ -72,7 +71,7 @@ The post-install script: ## Launch Docker Desktop -{{< include "desktop-linux-launch.md" >}} +{{% include "desktop-linux-launch.md" %}} ## Upgrade Docker Desktop diff --git a/content/manuals/desktop/setup/install/linux/fedora.md b/content/manuals/desktop/setup/install/linux/fedora.md index 205d4543a430..9f6c1dccecc4 100644 --- a/content/manuals/desktop/setup/install/linux/fedora.md +++ b/content/manuals/desktop/setup/install/linux/fedora.md @@ -25,15 +25,13 @@ This page contains information on how to install, launch and upgrade Docker Desk To install Docker Desktop successfully, you must: - Meet the [general system requirements](_index.md#general-system-requirements). -- Have a 64-bit version of Fedora 40 or Fedora 41. +- Have a 64-bit version of Fedora 41 or Fedora 42. +- For a GNOME desktop environment you must install AppIndicator and KStatusNotifierItem [GNOME extensions](https://extensions.gnome.org/extension/615/appindicator-support/). +- If you're not using GNOME, you must install `gnome-terminal` to enable terminal access from Docker Desktop: -Additionally, for a GNOME desktop environment you must install AppIndicator and KStatusNotifierItem [GNOME extensions](https://extensions.gnome.org/extension/615/appindicator-support/). - -For non-GNOME desktop environments, `gnome-terminal` must be installed: - -```console -$ sudo dnf install gnome-terminal -``` + ```console + $ sudo dnf install gnome-terminal + ``` ## Install Docker Desktop @@ -51,7 +49,7 @@ To install Docker Desktop on Fedora: By default, Docker Desktop is installed at `/opt/docker-desktop`. -There are a few post-install configuration steps done through the post-install script contained in the RPM package. +The RPM package includes a post-install script that completes additional setup steps automatically. The post-install script: @@ -62,7 +60,7 @@ The post-install script: ## Launch Docker Desktop -{{< include "desktop-linux-launch.md" >}} +{{% include "desktop-linux-launch.md" %}} ## Upgrade Docker Desktop diff --git a/content/manuals/desktop/setup/install/linux/rhel.md b/content/manuals/desktop/setup/install/linux/rhel.md index 91baeee45de1..88f0ae7aad74 100644 --- a/content/manuals/desktop/setup/install/linux/rhel.md +++ b/content/manuals/desktop/setup/install/linux/rhel.md @@ -6,11 +6,6 @@ keywords: red hat, red hat enterprise linux, rhel, rpm, title: Install Docker Desktop on RHEL linkTitle: RHEL download-url-base: https://download.docker.com/linux/rhel -params: - sidebar: - badge: - color: green - text: New aliases: - /desktop/install/linux/rhel/ --- @@ -18,7 +13,7 @@ aliases: > **Docker Desktop terms** > > Commercial use of Docker Desktop in larger enterprises (more than 250 -> employees OR more than $10 million USD in annual revenue) requires a [paid +> employees or more than $10 million USD in annual revenue) requires a [paid > subscription](https://www.docker.com/pricing/). This page contains information on how to install, launch and upgrade Docker Desktop on a Red Hat Enterprise Linux (RHEL) distribution. @@ -31,57 +26,54 @@ To install Docker Desktop successfully, you must: - Have a 64-bit version of either RHEL 8 or RHEL 9. - Have a [Docker account](/manuals/accounts/create-account.md), as authentication is required for Docker Desktop on RHEL. -If you don't have `pass` installed, or it can't be installed, you must enable -[CodeReady Linux Builder (CRB) repository](https://access.redhat.com/articles/4348511) -and -[Extra Packages for Enterprise Linux (EPEL)](https://docs.fedoraproject.org/en-US/epel/). +- If `pass` is not installed, or it can't be installed, you must enable [CodeReady Linux Builder (CRB) repository](https://access.redhat.com/articles/4348511) and [Extra Packages for Enterprise Linux (EPEL)](https://docs.fedoraproject.org/en-US/epel/). {{< tabs group="os_version" >}} {{< tab name="RHEL 9" >}} -```console -$ sudo subscription-manager repos --enable codeready-builder-for-rhel-9-$(arch)-rpms -$ sudo dnf install https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm -$ sudo dnf install pass -``` + ```console + $ sudo subscription-manager repos --enable codeready-builder-for-rhel-9-$(arch)-rpms + $ sudo dnf install https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm + $ sudo dnf install pass + ``` {{< /tab >}} {{< tab name="RHEL 8" >}} -```console -$ sudo subscription-manager repos --enable codeready-builder-for-rhel-8-$(arch)-rpms -$ sudo dnf install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm -$ sudo dnf install pass -``` + ```console + $ sudo subscription-manager repos --enable codeready-builder-for-rhel-8-$(arch)-rpms + $ sudo dnf install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm + $ sudo dnf install pass + ``` {{< /tab >}} {{< /tabs >}} -Additionally, for a GNOME desktop environment you must install AppIndicator and KStatusNotifierItem [GNOME extensions](https://extensions.gnome.org/extension/615/appindicator-support/). You must also enable EPEL. +- For a GNOME desktop environment you must install AppIndicator and KStatusNotifierItem [GNOME extensions](https://extensions.gnome.org/extension/615/appindicator-support/). You must also enable EPEL. {{< tabs group="os_version" >}} {{< tab name="RHEL 9" >}} -```console -$ # enable EPEL as described above -$ sudo dnf install gnome-shell-extension-appindicator -$ sudo gnome-extensions enable appindicatorsupport@rgcjonas.gmail.com -``` + ```console + $ # enable EPEL as described above + $ sudo dnf install gnome-shell-extension-appindicator + $ sudo gnome-extensions enable appindicatorsupport@rgcjonas.gmail.com + ``` {{< /tab >}} {{< tab name="RHEL 8" >}} -```console -$ # enable EPEL as described above -$ sudo dnf install gnome-shell-extension-appindicator -$ sudo dnf install gnome-shell-extension-desktop-icons -$ sudo gnome-shell-extension-tool -e appindicatorsupport@rgcjonas.gmail.com -``` + ```console + $ # enable EPEL as described above + $ sudo dnf install gnome-shell-extension-appindicator + $ sudo dnf install gnome-shell-extension-desktop-icons + $ sudo gnome-shell-extension-tool -e appindicatorsupport@rgcjonas.gmail.com + ``` {{< /tab >}} {{< /tabs >}} -For non-GNOME desktop environments, `gnome-terminal` must be installed: +- If you're not using GNOME, you must install `gnome-terminal` to enable terminal access from Docker Desktop: -```console -$ sudo dnf install gnome-terminal -``` + ```console + $ sudo dnf install gnome-terminal + ``` ## Install Docker Desktop @@ -101,19 +93,19 @@ To install Docker Desktop on RHEL: $ sudo dnf install ./docker-desktop-x86_64-rhel.rpm ``` -There are a few post-install configuration steps done through the post-install script contained in the RPM package. +The RPM package includes a post-install script that completes additional setup steps automatically. The post-install script: - Sets the capability on the Docker Desktop binary to map privileged ports and set resource limits. - Adds a DNS name for Kubernetes to `/etc/hosts`. - Creates a symlink from `/usr/local/bin/com.docker.cli` to `/usr/bin/docker`. - This is because the classic Docker CLI is installed at `/usr/bin/docker`. The Docker Desktop installer also installs a Docker CLI binary that includes cloud-integration capabilities and is essentially a wrapper for the Compose CLI, at`/usr/local/bin/com.docker.cli`. The symlink ensures that the wrapper can access the classic Docker CLI. + This is because the classic Docker CLI is installed at `/usr/bin/docker`. The Docker Desktop installer also installs a Docker CLI binary that includes cloud-integration capabilities and is essentially a wrapper for the Compose CLI, at `/usr/local/bin/com.docker.cli`. The symlink ensures that the wrapper can access the classic Docker CLI. - Creates a symlink from `/usr/libexec/qemu-kvm` to `/usr/local/bin/qemu-system-x86_64`. ## Launch Docker Desktop -{{< include "desktop-linux-launch.md" >}} +{{% include "desktop-linux-launch.md" %}} > [!IMPORTANT] > @@ -140,7 +132,7 @@ $ sudo dnf install ./docker-desktop--rhel.rpm ## Next steps -- Explore [Docker's subscriptions](https://www.docker.com/pricing/) to see what Docker can offer you. +- Review [Docker's subscriptions](https://www.docker.com/pricing/) to see what Docker can offer you. - Take a look at the [Docker workshop](/get-started/workshop/_index.md) to learn how to build an image and run it as a containerized application. - [Explore Docker Desktop](/manuals/desktop/use-desktop/_index.md) and all its features. - [Troubleshooting](/manuals/desktop/troubleshoot-and-support/troubleshoot/_index.md) describes common problems, workarounds, how to run and submit diagnostics, and submit issues. diff --git a/content/manuals/desktop/setup/install/linux/ubuntu.md b/content/manuals/desktop/setup/install/linux/ubuntu.md index 9b86c4b567a5..282bb8d81849 100644 --- a/content/manuals/desktop/setup/install/linux/ubuntu.md +++ b/content/manuals/desktop/setup/install/linux/ubuntu.md @@ -1,5 +1,5 @@ --- -description: Learn how to install, launch and upgrade Docker Desktop on Ubuntu. This +description: Learn how to install, launch, and upgrade Docker Desktop on Ubuntu. This quick guide will cover prerequisites, installation methods, and more. keywords: install docker ubuntu, ubuntu install docker, install docker on ubuntu, docker install ubuntu, how to install docker on ubuntu, ubuntu docker install, docker @@ -18,7 +18,7 @@ aliases: > **Docker Desktop terms** > > Commercial use of Docker Desktop in larger enterprises (more than 250 -> employees OR more than $10 million USD in annual revenue) requires a [paid +> employees or more than $10 million USD in annual revenue) requires a [paid > subscription](https://www.docker.com/pricing/). This page contains information on how to install, launch and upgrade Docker Desktop on an Ubuntu distribution. @@ -29,7 +29,7 @@ To install Docker Desktop successfully, you must: - Meet the [general system requirements](_index.md#general-system-requirements). - Have an x86-64 system with Ubuntu 22.04, 24.04, or the latest non-LTS version. -- For non-Gnome Desktop environments, `gnome-terminal` must be installed: +- If you're not using GNOME, you must install `gnome-terminal` to enable terminal access from Docker Desktop: ```console $ sudo apt install gnome-terminal ``` @@ -43,7 +43,7 @@ Recommended approach to install Docker Desktop on Ubuntu: 2. Download the latest [DEB package](https://desktop.docker.com/linux/main/amd64/docker-desktop-amd64.deb?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-linux-amd64). For checksums, see the [Release notes](/manuals/desktop/release-notes.md). -3. Install the package with apt as follows: +3. Install the package using `apt`: ```console $ sudo apt-get update @@ -61,22 +61,22 @@ Recommended approach to install Docker Desktop on Ubuntu: By default, Docker Desktop is installed at `/opt/docker-desktop`. -There are a few post-install configuration steps done through the post-install script contained in the deb package. +The DEB package includes a post-install script that completes additional setup steps automatically. The post-install script: - Sets the capability on the Docker Desktop binary to map privileged ports and set resource limits. - Adds a DNS name for Kubernetes to `/etc/hosts`. - Creates a symlink from `/usr/local/bin/com.docker.cli` to `/usr/bin/docker`. - This is because the classic Docker CLI is installed at `/usr/bin/docker`. The Docker Desktop installer also installs a Docker CLI binary that includes cloud-integration capabilities and is essentially a wrapper for the Compose CLI, at`/usr/local/bin/com.docker.cli`. The symlink ensures that the wrapper can access the classic Docker CLI. + This is because the classic Docker CLI is installed at `/usr/bin/docker`. The Docker Desktop installer also installs a Docker CLI binary that includes cloud-integration capabilities and is essentially a wrapper for the Compose CLI, at `/usr/local/bin/com.docker.cli`. The symlink ensures that the wrapper can access the classic Docker CLI. ## Launch Docker Desktop -{{< include "desktop-linux-launch.md" >}} +{{% include "desktop-linux-launch.md" %}} ## Upgrade Docker Desktop -Once a new version for Docker Desktop is released, the Docker UI shows a notification. +When a new version for Docker Desktop is released, the Docker UI shows a notification. You need to download the new package each time you want to upgrade Docker Desktop and run: ```console @@ -85,8 +85,8 @@ $ sudo apt-get install ./docker-desktop-amd64.deb ## Next steps -- Explore [Docker's subscriptions](https://www.docker.com/pricing/) to see what Docker can offer you. -- Take a look at the [Docker workshop](/get-started/workshop/_index.md) to learn how to build an image and run it as a containerized application. +- Review [Docker's subscriptions](https://www.docker.com/pricing/) to see what Docker can offer you. +- Follow the [Docker workshop](/get-started/workshop/_index.md) to learn how to build an image and run it as a containerized application. - [Explore Docker Desktop](/manuals/desktop/use-desktop/_index.md) and all its features. - [Troubleshooting](/manuals/desktop/troubleshoot-and-support/troubleshoot/_index.md) describes common problems, workarounds, how to run and submit diagnostics, and submit issues. - [FAQs](/manuals/desktop/troubleshoot-and-support/faqs/general.md) provide answers to frequently asked questions. diff --git a/content/manuals/desktop/setup/install/mac-install.md b/content/manuals/desktop/setup/install/mac-install.md index 6228a93becc7..292df0ccccd0 100644 --- a/content/manuals/desktop/setup/install/mac-install.md +++ b/content/manuals/desktop/setup/install/mac-install.md @@ -1,5 +1,5 @@ --- -description: Install Docker for Mac to get started. This guide covers system requirements, +description: Install Docker Desktop for Mac to get started. This guide covers system requirements, where to download, and instructions on how to install and update. keywords: docker for mac, install docker macos, docker mac, docker mac install, docker install macos, install docker on mac, install docker macbook, docker desktop for @@ -21,10 +21,10 @@ aliases: > **Docker Desktop terms** > > Commercial use of Docker Desktop in larger enterprises (more than 250 -> employees OR more than $10 million USD in annual revenue) requires a [paid +> employees or more than $10 million USD in annual revenue) requires a [paid > subscription](https://www.docker.com/pricing/). -This page contains download URLs, information about system requirements, and instructions on how to install Docker Desktop for Mac. +This page provides download links, system requirements, and step-by-step installation instructions for Docker Desktop on Mac. {{< button text="Docker Desktop for Mac with Apple silicon" url="https://desktop.docker.com/mac/main/arm64/Docker.dmg?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-mac-arm64" >}} {{< button text="Docker Desktop for Mac with Intel chip" url="https://desktop.docker.com/mac/main/amd64/Docker.dmg?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-mac-amd64" >}} @@ -44,7 +44,7 @@ This page contains download URLs, information about system requirements, and ins > [!IMPORTANT] > - > Docker supports Docker Desktop on the most recent versions of macOS. That is, the current release of macOS and the previous two releases. As new major versions of macOS are made generally available, Docker stops supporting the oldest version and supports the newest version of macOS (in addition to the previous two releases). + > Docker Desktop is supported on the current and two previous major macOS releases. As new major versions of macOS are made generally available, Docker stops supporting the oldest version and supports the newest version of macOS (in addition to the previous two releases). - At least 4 GB of RAM. @@ -55,10 +55,10 @@ This page contains download URLs, information about system requirements, and ins > [!IMPORTANT] > - > Docker supports Docker Desktop on the most recent versions of macOS. That is, the current release of macOS and the previous two releases. As new major versions of macOS are made generally available, Docker stops supporting the oldest version and supports the newest version of macOS (in addition to the previous two releases). + > Docker Desktop is supported on the current and two previous major macOS releases. As new major versions of macOS are made generally available, Docker stops supporting the oldest version and supports the newest version of macOS (in addition to the previous two releases). - At least 4 GB of RAM. -- For the best experience, it's recommended that you install Rosetta 2. There is no longer a hard requirement to install Rosetta 2, however there are a few optional command line tools that still require Rosetta 2 when using Darwin/AMD64. See [Known issues](/manuals/desktop/troubleshoot-and-support/troubleshoot/known-issues.md). To install Rosetta 2 manually from the command line, run the following command: +- For the best experience, it's recommended that you install Rosetta 2. Rosetta 2 is no longer strictly required, however there are a few optional command line tools that still require Rosetta 2 when using Darwin/AMD64. See [Known issues](/manuals/desktop/troubleshoot-and-support/troubleshoot/known-issues.md). To install Rosetta 2 manually from the command line, run the following command: ```console $ softwareupdate --install-rosetta @@ -111,13 +111,25 @@ $ sudo hdiutil detach /Volumes/Docker By default, Docker Desktop is installed at `/Applications/Docker.app`. As macOS typically performs security checks the first time an application is used, the `install` command can take several minutes to run. +#### Installer flags + The `install` command accepts the following flags: + +##### Installation behavior + - `--accept-license`: Accepts the [Docker Subscription Service Agreement](https://www.docker.com/legal/docker-subscription-service-agreement) now, rather than requiring it to be accepted when the application is first run. +- `--user=`: Performs the privileged configurations once during installation. This removes the need for the user to grant root privileges on first run. For more information, see [Privileged helper permission requirements](/manuals/desktop/setup/install/mac-permission-requirements.md#permission-requirements). To find the username, enter `ls /Users` in the CLI. + +##### Security and access + - `--allowed-org=`: Requires the user to sign in and be part of the specified Docker Hub organization when running the application - `--user=`: Performs the privileged configurations once during installation. This removes the need for the user to grant root privileges on first run. For more information, see [Privileged helper permission requirements](/manuals/desktop/setup/install/mac-permission-requirements.md#permission-requirements). To find the username, enter `ls /Users` in the CLI. -- `--admin-settings`: Automatically creates an `admin-settings.json` file which is used by administrators to control certain Docker Desktop settings on client machines within their organization. For more information, see [Settings Management](/manuals/security/for-admins/hardened-desktop/settings-management/_index.md). +- `--admin-settings`: Automatically creates an `admin-settings.json` file which is used by administrators to control certain Docker Desktop settings on client machines within their organization. For more information, see [Settings Management](/manuals/enterprise/security/hardened-desktop/settings-management/_index.md). - It must be used together with the `--allowed-org=` flag. - For example: `--allowed-org= --admin-settings="{'configurationFileVersion': 2, 'enhancedContainerIsolation': {'value': true, 'locked': false}}"` + +##### Proxy configuration + - `--proxy-http-mode=`: Sets the HTTP Proxy mode. The two modes are `system` (default) or `manual`. - `--override-proxy-http=`: Sets the URL of the HTTP proxy that must be used for outgoing HTTP requests. It requires `--proxy-http-mode` to be `manual`. - `--override-proxy-https=`: Sets the URL of the HTTP proxy that must be used for outgoing HTTPS requests, requires `--proxy-http-mode` to be `manual` @@ -125,7 +137,7 @@ The `install` command accepts the following flags: > [!TIP] > -> As an IT administrator, you can use endpoint management (MDM) software to identify the number of Docker Desktop instances and their versions within your environment. This can provide accurate license reporting, help ensure your machines use the latest version of Docker Desktop, and enable you to [enforce sign-in](/manuals/security/for-admins/enforce-sign-in/_index.md). +> As an IT administrator, you can use endpoint management (MDM) software to identify the number of Docker Desktop instances and their versions within your environment. This can provide accurate license reporting, help ensure your machines use the latest version of Docker Desktop, and enable you to [enforce sign-in](/manuals/enterprise/security/enforce-sign-in/_index.md). > - [Intune](https://learn.microsoft.com/en-us/mem/intune/apps/app-discovered-apps) > - [Jamf](https://docs.jamf.com/10.25.0/jamf-pro/administrator-guide/Application_Usage.html) > - [Kandji](https://support.kandji.io/support/solutions/articles/72000559793-view-a-device-application-list) diff --git a/content/manuals/desktop/setup/install/mac-permission-requirements.md b/content/manuals/desktop/setup/install/mac-permission-requirements.md index 9f24fe9d1ca7..20942720160b 100644 --- a/content/manuals/desktop/setup/install/mac-permission-requirements.md +++ b/content/manuals/desktop/setup/install/mac-permission-requirements.md @@ -3,6 +3,7 @@ description: Understand permission requirements for Docker Desktop for Mac and t differences between versions keywords: Docker Desktop, mac, security, install, permissions title: Understand permission requirements for Docker Desktop on Mac +linkTitle: Mac permission requirements aliases: - /docker-for-mac/privileged-helper/ - /desktop/mac/privileged-helper/ @@ -15,20 +16,17 @@ This page contains information about the permission requirements for running and It also provides clarity on running containers as `root` as opposed to having `root` access on the host. +Docker Desktop on Mac is designed with security in mind. Administrative rights are only required when absolutely necessary. + ## Permission requirements Docker Desktop for Mac is run as an unprivileged user. However, Docker Desktop requires certain functionalities to perform a limited set of privileged configurations such as: - [Installing symlinks](#installing-symlinks) in`/usr/local/bin`. - - [Binding privileged ports](#binding-privileged-ports) that are less than 1024. The so-called "privileged ports" are not generally used as a security boundary, however operating systems still prevent unprivileged processes from binding them which breaks commands like `docker run -p 127.0.0.1:80:80 docker/getting-started`. + - [Binding privileged ports](#binding-privileged-ports) that are less than 1024. Although privileged ports (ports below 1024) are not typically used as a security boundary, operating systems still prevent unprivileged processes from binding to them which breaks commands like `docker run -p 127.0.0.1:80:80 docker/getting-started`. - [Ensuring `localhost` and `kubernetes.docker.internal` are defined](#ensuring-localhost-and-kubernetesdockerinternal-are-defined) in `/etc/hosts`. Some old macOS installs don't have `localhost` in `/etc/hosts`, which causes Docker to fail. Defining the DNS name `kubernetes.docker.internal` allows Docker to share Kubernetes contexts with containers. - Securely caching the Registry Access Management policy which is read-only for the developer. -Depending on which version of Docker Desktop for Mac is used, privileged access is granted either during installation, first run, or only when it's needed. - -{{< tabs >}} -{{< tab name="Version 4.18 and later" >}} - -From version 4.18 and later, Docker Desktop for Mac provides greater control over functionality that's enabled during installation. +Privileged access is granted during installation. The first time Docker Desktop for Mac launches, it presents an installation window where you can choose to either use the default settings, which work for most developers and requires you to grant privileged access, or use advanced settings. @@ -41,101 +39,29 @@ Depending on which advanced settings you configure, you must enter your password You can change these configurations at a later date from the **Advanced** page in **Settings**. -{{< /tab >}} -{{< tab name="Version 4.15 - 4.17" >}} - -Versions 4.15 to 4.17 of Docker Desktop for Mac don't require the privileged process to run permanently. Whenever elevated privileges are needed for a configuration, Docker Desktop prompts you with information on the task it needs to perform. Most configurations are applied once, subsequent runs don't prompt for privileged access anymore. -The only time Docker Desktop may start the privileged process is for binding privileged ports that aren't allowed by default on the host OS. - -{{< /tab >}} -{{< tab name="Versions prior to 4.15" >}} - -Versions prior to 4.15 of Docker Desktop for Mac require `root` access to be granted on the first run. The first time that Docker Desktop launches you receive an admin prompt to grant permission for the installation of the `com.docker.vmnetd` privileged helper service. For subsequent runs, `root` privileges aren't required. Following the principle of least privilege, this approach allows `root` access to be used only for the operations for which it's absolutely necessary, while still being able to use Docker Desktop as an unprivileged user. -All privileged operations are run using the privileged helper process `com.docker.vmnetd`. - -{{< /tab >}} -{{< /tabs >}} - ### Installing symlinks The Docker binaries are installed by default in `/Applications/Docker.app/Contents/Resources/bin`. Docker Desktop creates symlinks for the binaries in `/usr/local/bin`, which means they're automatically included in `PATH` on most systems. -{{< tabs >}} -{{< tab name="Version 4.18 and later" >}} - -With version 4.18 and later, you can choose whether to install symlinks either in `/usr/local/bin` or `$HOME/.docker/bin` during installation of Docker Desktop. +You can choose whether to install symlinks either in `/usr/local/bin` or `$HOME/.docker/bin` during installation of Docker Desktop. -If `/usr/local/bin` is chosen, and this location is not writable by unprivileged users, Docker Desktop requires authorization to confirm this choice before the symlinks to Docker binaries are created in `/usr/local/bin`. If `$HOME/.docker/bin` is chosen, authorization is not required, but then you must [manually add `$HOME/.docker/bin`](/manuals/desktop/settings-and-maintenance/settings.md#advanced) to their PATH. +If `/usr/local/bin` is chosen, and this location is not writable by unprivileged users, Docker Desktop requires authorization to confirm this choice before the symlinks to Docker binaries are created in `/usr/local/bin`. If `$HOME/.docker/bin` is chosen, authorization is not required, but then you must [manually add `$HOME/.docker/bin`](/manuals/desktop/settings-and-maintenance/settings.md#advanced) to your PATH. You are also given the option to enable the installation of the `/var/run/docker.sock` symlink. Creating this symlink ensures various Docker clients relying on the default Docker socket path work without additional changes. As the `/var/run` is mounted as a tmpfs, its content is deleted on restart, symlink to the Docker socket included. To ensure the Docker socket exists after restart, Docker Desktop sets up a `launchd` startup task that creates the symlink by running `ln -s -f /Users//.docker/run/docker.sock /var/run/docker.sock`. This ensures the you aren't prompted on each startup to create the symlink. If you don't enable this option at installation, the symlink and the startup task is not created and you may have to explicitly set the `DOCKER_HOST` environment variable to `/Users//.docker/run/docker.sock` in the clients it is using. The Docker CLI relies on the current context to retrieve the socket path, the current context is set to `desktop-linux` on Docker Desktop startup. -{{< /tab >}} -{{< tab name="Version 4.17 and earlier" >}} - -For versions prior to 4.18, installing symlinks in `/usr/local/bin` is a privileged configuration Docker Desktop performs on the first startup. Docker Desktop checks if symlinks exists and takes the following actions: -- Creates the symlinks without the admin prompt if `/usr/local/bin` is writable by unprivileged users. -- Triggers an admin prompt for you to authorize the creation of symlinks in `/usr/local/bin`. If you authorizes this, symlinks to Docker binaries are created in `/usr/local/bin`. If you reject the prompt, are not willing to run configurations requiring elevated privileges, or don't have admin rights on your machine, Docker Desktop creates the symlinks in `~/.docker/bin` and edits your shell profile to ensure this location is in your PATH. This requires all open shells to be reloaded. -The rejection is recorded for future runs to avoid prompting you again. -For any failure to ensure binaries are on your PATH, you may need to manually add to their PATH the `/Applications/Docker.app/Contents/Resources/bin` or use the full path to Docker binaries. - -A particular case is the installation of the `/var/run/docker.sock` symlink. Creating this symlink ensures various Docker clients relying on the default Docker socket path work without additional changes. As the `/var/run` is mounted as a tmpfs, its content is deleted on restart, symlink to Docker socket included. -To ensure the Docker socket exists after restart, Docker Desktop sets up a `launchd` startup task that creates a symlink by running `ln -s -f /Users//.docker/run/docker.sock /var/run/docker.sock`. This ensures that you are not prompted on each startup to create the symlink. If you reject the prompt, the symlink and the startup task are not created and you may have to explicitly set the `DOCKER_HOST` to `/Users//.docker/run/docker.sock` in the clients it is using. The Docker CLI relies on the current context to retrieve the socket path, the current context is set to `desktop-linux` on Docker Desktop startup. - -{{< /tab >}} -{{< /tabs >}} - ### Binding privileged ports -{{< tabs >}} -{{< tab name="Version 4.18 and later" >}} - -With version 4.18 and later you can choose to enable privileged port mapping during installation, or from the **Advanced** page in **Settings** post-installation. Docker Desktop requires authorization to confirm this choice. - -{{< /tab >}} -{{< tab name="Version 4.17 and earlier" >}} - -For versions below 4.18 , if you run a container that requires binding privileged ports, Docker Desktop first attempts to bind it directly as an unprivileged process. If the OS prevents this and it fails, Docker Desktop checks if the `com.docker.vmnetd` privileged helper process is running to bind the privileged port through it. - -If the privileged helper process is not running, Docker Desktop prompts you for authorization to run it under [launchd](https://developer.apple.com/library/archive/documentation/MacOSX/Conceptual/BPSystemStartup/Chapters/CreatingLaunchdJobs.html). -This configures the privileged helper to run as in the versions of Docker Desktop prior to 4.15. However, the functionality provided by this privileged helper now only supports port binding and caching the Registry Access Management policy. -If you decline the launch of the privileged helper process, binding the privileged port cannot be done and the Docker CLI returns an error: -```console -$ docker run -p 127.0.0.1:80:80 docker/getting-started - -docker: Error response from daemon: Ports are not available: exposing port -TCP 127.0.0.1:80 -> 0.0.0.0:0: failed to connect to /var/run/com.docker.vmnetd.sock: -is vmnetd running?: dial unix /var/run/com.docker.vmnetd.sock: connect: connection -refused. -ERRO[0003] error waiting for container: context canceled -``` - -> [!NOTE] -> -> The command may fail with the same error if you take too long to authorize the prompt to start the helper process, as it may timeout. - -{{< /tab >}} -{{< /tabs >}} +You can choose to enable privileged port mapping during installation, or from the **Advanced** page in **Settings** post-installation. Docker Desktop requires authorization to confirm this choice. ### Ensuring `localhost` and `kubernetes.docker.internal` are defined -{{< tabs >}} -{{< tab name="Version 4.18 and later" >}} - -With versions 4.18 and later, it is your responsibility to ensure that localhost is resolved to `127.0.0.1` and if Kubernetes is used, that `kubernetes.docker.internal` is resolved to `127.0.0.1`. - -{{< /tab >}} -{{< tab name="Version 4.17 and earlier" >}} - -On first run, Docker Desktop checks if `localhost` is resolved to `127.0.0.1`. In case the resolution fails, it prompts you to allow adding the mapping to `/etc/hosts`. Similarly, when the Kubernetes cluster is installed, it checks that `kubernetes.docker.internal` is resolved to `127.0.0.1` and prompts you to do so. - -{{< /tab >}} -{{< /tabs >}} +It is your responsibility to ensure that localhost is resolved to `127.0.0.1` and if Kubernetes is used, that `kubernetes.docker.internal` is resolved to `127.0.0.1`. ## Installing from the command line -In version 4.11 and later of Docker Desktop for Mac, privileged configurations are applied during the installation with the `--user` flag on the [install command](/manuals/desktop/setup/install/mac-install.md#install-from-the-command-line). In this case, you are not prompted to grant root privileges on the first run of Docker Desktop. Specifically, the `--user` flag: +Privileged configurations are applied during the installation with the `--user` flag on the [install command](/manuals/desktop/setup/install/mac-install.md#install-from-the-command-line). In this case, you are not prompted to grant root privileges on the first run of Docker Desktop. Specifically, the `--user` flag: - Uninstalls the previous `com.docker.vmnetd` if present - Sets up symlinks - Ensures that `localhost` is resolved to `127.0.0.1` @@ -179,7 +105,7 @@ retain their original permissions. ## Enhanced Container Isolation In addition, Docker Desktop supports [Enhanced Container Isolation -mode](/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/_index.md) (ECI), +mode](/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/_index.md) (ECI), available to Business customers only, which further secures containers without impacting developer workflows. diff --git a/content/manuals/desktop/setup/install/windows-install.md b/content/manuals/desktop/setup/install/windows-install.md index 53954a419bd2..87c90070cd78 100644 --- a/content/manuals/desktop/setup/install/windows-install.md +++ b/content/manuals/desktop/setup/install/windows-install.md @@ -28,10 +28,11 @@ aliases: > employees OR more than $10 million USD in annual revenue) requires a [paid > subscription](https://www.docker.com/pricing/). -This page contains the download URL, information about system requirements, and instructions on how to install Docker Desktop for Windows. +This page provides download links, system requirements, and step-by-step installation instructions for Docker Desktop on Windows. {{< button text="Docker Desktop for Windows - x86_64" url="https://desktop.docker.com/win/main/amd64/Docker%20Desktop%20Installer.exe?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-win-amd64" >}} -{{< button text="Docker Desktop for Windows - Arm (Beta)" url="https://desktop.docker.com/win/main/arm64/Docker%20Desktop%20Installer.exe?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-win-arm64" >}} +{{< button text="Docker Desktop for Windows - x86_64 on the Microsoft Store" url="https://apps.microsoft.com/detail/xp8cbj40xlbwkx?hl=en-GB&gl=GB" >}} +{{< button text="Docker Desktop for Windows - Arm (Early Access)" url="https://desktop.docker.com/win/main/arm64/Docker%20Desktop%20Installer.exe?utm_source=docker&utm_medium=webreferral&utm_campaign=docs-driven-download-win-arm64" >}} _For checksums, see [Release notes](/manuals/desktop/release-notes.md)_ @@ -41,12 +42,12 @@ _For checksums, see [Release notes](/manuals/desktop/release-notes.md)_ > > **Should I use Hyper-V or WSL?** > -> Docker Desktop's functionality remains consistent on both WSL and Hyper-V, without a preference for either architecture. Hyper-V and WSL have their own advantages and disadvantages, depending on your specific set up and your planned use case. +> Docker Desktop's functionality remains consistent on both WSL and Hyper-V, without a preference for either architecture. Hyper-V and WSL have their own advantages and disadvantages, depending on your specific setup and your planned use case. {{< tabs >}} {{< tab name="WSL 2 backend, x86_64" >}} -- WSL version 1.1.3.0 or later. +- WSL version 2.1.5 or later. - Windows 11 64-bit: Home or Pro version 22H2 or higher, or Enterprise or Education version 22H2 or higher. - Windows 10 64-bit: Minimum required is Home or Pro 22H2 (build 19045) or higher, or Enterprise or Education 22H2 (build 19045) or higher. - Turn on the WSL 2 feature on Windows. For detailed instructions, refer to the @@ -55,8 +56,8 @@ _For checksums, see [Release notes](/manuals/desktop/release-notes.md)_ WSL 2 on Windows 10 or Windows 11: - 64-bit processor with [Second Level Address Translation (SLAT)](https://en.wikipedia.org/wiki/Second_Level_Address_Translation) - 4GB system RAM - - Enable hardware virtualization in BIOS. For more information, see - [Virtualization](/manuals/desktop/troubleshoot-and-support/troubleshoot/topics.md#virtualization). + - Enable hardware virtualization in BIOS/UEFI. For more information, see + [Virtualization](/manuals/desktop/troubleshoot-and-support/troubleshoot/topics.md#docker-desktop-fails-due-to-virtualization-not-working). For more information on setting up WSL 2 with Docker Desktop, see [WSL](/manuals/desktop/features/wsl/_index.md). @@ -80,8 +81,8 @@ For more information on setting up WSL 2 with Docker Desktop, see [WSL](/manuals - 64 bit processor with [Second Level Address Translation (SLAT)](https://en.wikipedia.org/wiki/Second_Level_Address_Translation) - 4GB system RAM - - Turn on BIOS-level hardware virtualization support in the - BIOS settings. For more information, see + - Turn on BIOS/UEFI-level hardware virtualization support in the + BIOS/UEFI settings. For more information, see [Virtualization](/manuals/desktop/troubleshoot-and-support/troubleshoot/topics.md#virtualization). > [!NOTE] @@ -94,9 +95,9 @@ For more information on setting up WSL 2 with Docker Desktop, see [WSL](/manuals > Windows Home or Education editions only allow you to run Linux containers. {{< /tab >}} -{{< tab name="WSL 2 backend, Arm (Beta)" >}} +{{< tab name="WSL 2 backend, Arm (Early Access)" >}} -- WSL version 1.1.3.0 or later. +- WSL version 2.1.5 or later. - Windows 11 64-bit: Home or Pro version 22H2 or higher, or Enterprise or Education version 22H2 or higher. - Windows 10 64-bit: Minimum required is Home or Pro 22H2 (build 19045) or higher, or Enterprise or Education 22H2 (build 19045) or higher. - Turn on the WSL 2 feature on Windows. For detailed instructions, refer to the @@ -105,7 +106,7 @@ For more information on setting up WSL 2 with Docker Desktop, see [WSL](/manuals WSL 2 on Windows 10 or Windows 11: - 64-bit processor with [Second Level Address Translation (SLAT)](https://en.wikipedia.org/wiki/Second_Level_Address_Translation) - 4GB system RAM - - Enable hardware virtualization in BIOS. For more information, see + - Enable hardware virtualization in BIOS/UEFI. For more information, see [Virtualization](/manuals/desktop/troubleshoot-and-support/troubleshoot/topics.md#virtualization). > [!IMPORTANT] @@ -157,6 +158,41 @@ again when you switch back. {{< /accordion >}} +## Administrator privileges and installation requirements + +Installing Docker Desktop requires administrator privileges. However, once installed, it can be used without administrative access. Some actions, though, still need elevated permissions. See [Understand permission requirements for Windows](./windows-permission-requirements.md) for more detail. + +If your users do not have administrator rights and plan to perform operations that require elevated privileges, be sure to install Docker Desktop using the `--always-run-service` installer flag. This ensures those actions can still be executed without prompting for User Account Control (UAC) elevation. See [Installer Flags](#installer-flags) for more detail. + +## WSL: Verification and setup + +If you have chosen to use WSL, first verify that your installed version meets system requirements by running the following command in your terminal: + +```console +wsl --version +``` + +If version details do not appear, you are likely using the inbox version of WSL. This version does not support modern capabilities and must be updated. + +You can update or install WSL using one of the following methods: + +### Option 1: Install or update WSL via the terminal + +1. Open PowerShell or Windows Command Prompt in administrator mode. +2. Run either the install or update command. You may be prompted to restart your machine. For more information, refer to [Install WSL](https://learn.microsoft.com/en-us/windows/wsl/install). +```console +wsl --install + +wsl --update +``` + +### Option 2: Install WSL via the MSI package + +If Microsoft Store access is blocked due to security policies: +1. Go to the official [WSL GitHub Releases page](https://github.com/microsoft/WSL/releases). +2. Download the `.msi` installer from the latest stable release (under the Assets drop-down). +3. Run the downloaded installer and follow the setup instructions. + ## Install Docker Desktop on Windows > [!TIP] @@ -171,15 +207,16 @@ again when you switch back. 3. When prompted, ensure the **Use WSL 2 instead of Hyper-V** option on the Configuration page is selected or not depending on your choice of backend. - If your system only supports one of the two options, you won't be able to select which backend to use. + On systems that support only one backend, Docker Desktop automatically selects the available option. -4. Follow the instructions on the installation wizard to authorize the installer and proceed with the install. +4. Follow the instructions on the installation wizard to authorize the installer and proceed with the installation. 5. When the installation is successful, select **Close** to complete the installation process. 6. [Start Docker Desktop](#start-docker-desktop). -If your administrator account is different to your user account, you must add the user to the **docker-users** group: +If your administrator account is different to your user account, you must add the user to the **docker-users** group to access features that require higher privileges, such as creating and managing the Hyper-V VM, or using Windows containers: + 1. Run **Computer Management** as an **administrator**. 2. Navigate to **Local Users and Groups** > **Groups** > **docker-users**. 3. Right-click to add the user to the group. @@ -207,40 +244,53 @@ start /w "" "Docker Desktop Installer.exe" install By default, Docker Desktop is installed at `C:\Program Files\Docker\Docker`. +#### Installer flags + +> [!NOTE] +> +> If you're using PowerShell, you need to use the `ArgumentList` parameter before any flags. +> For example: +> ```powershell +> Start-Process 'Docker Desktop Installer.exe' -Wait -ArgumentList 'install', '--accept-license' +> ``` + +If your admin account is different to your user account, you must add the user to the **docker-users** group to access features that require higher privileges, such as creating and managing the Hyper-V VM, or using Windows containers. + +```console +$ net localgroup docker-users /add +``` + The `install` command accepts the following flags: + +##### Installation behavior + - `--quiet`: Suppresses information output when running the installer - `--accept-license`: Accepts the [Docker Subscription Service Agreement](https://www.docker.com/legal/docker-subscription-service-agreement) now, rather than requiring it to be accepted when the application is first run -- `--no-windows-containers`: Disables the Windows containers integration -- `--allowed-org=`: Requires the user to sign in and be part of the specified Docker Hub organization when running the application -- `--backend=`: Selects the default backend to use for Docker Desktop, `hyper-v`, `windows` or `wsl-2` (default) - `--installation-dir=`: Changes the default installation location (`C:\Program Files\Docker\Docker`) -- `--admin-settings`: Automatically creates an `admin-settings.json` file which is used by admins to control certain Docker Desktop settings on client machines within their organization. For more information, see [Settings Management](/manuals/security/for-admins/hardened-desktop/settings-management/_index.md). +- `--backend=`: Selects the default backend to use for Docker Desktop, `hyper-v`, `windows` or `wsl-2` (default) +- `--always-run-service`: After installation completes, starts `com.docker.service` and sets the service startup type to Automatic. This circumvents the need for administrator privileges, which are otherwise necessary to start `com.docker.service`. `com.docker.service` is required by Windows containers and Hyper-V backend. + +##### Security and access control + +- `--allowed-org=`: Requires the user to sign in and be part of the specified Docker Hub organization when running the application +- `--admin-settings`: Automatically creates an `admin-settings.json` file which is used by admins to control certain Docker Desktop settings on client machines within their organization. For more information, see [Settings Management](/manuals/enterprise/security/hardened-desktop/settings-management/_index.md). - It must be used together with the `--allowed-org=` flag. - For example:`--allowed-org= --admin-settings="{'configurationFileVersion': 2, 'enhancedContainerIsolation': {'value': true, 'locked': false}}"` +- `--no-windows-containers`: Disables the Windows containers integration. This can improve security. For more information, see [Windows containers](/manuals/desktop/setup/install/windows-permission-requirements.md#windows-containers). + +##### Proxy configuration - `--proxy-http-mode=`: Sets the HTTP Proxy mode, `system` (default) or `manual` - `--override-proxy-http=`: Sets the URL of the HTTP proxy that must be used for outgoing HTTP requests, requires `--proxy-http-mode` to be `manual` - `--override-proxy-https=`: Sets the URL of the HTTP proxy that must be used for outgoing HTTPS requests, requires `--proxy-http-mode` to be `manual` - `--override-proxy-exclude=`: Bypasses proxy settings for the hosts and domains. Uses a comma-separated list. - `--proxy-enable-kerberosntlm`: Enables Kerberos and NTLM proxy authentication. If you are enabling this, ensure your proxy server is properly configured for Kerberos/NTLM authentication. Available with Docker Desktop 4.32 and later. + +##### Data root and disk location + - `--hyper-v-default-data-root=`: Specifies the default location for the Hyper-V VM disk. - `--windows-containers-default-data-root=`: Specifies the default location for the Windows containers. - `--wsl-default-data-root=`: Specifies the default location for the WSL distribution disk. -- `--always-run-service`: After installation completes, starts `com.docker.service` and sets the service startup type to Automatic. This circumvents the need for administrator privileges, which are otherwise necessary to start `com.docker.service`. `com.docker.service` is required by Windows containers and Hyper-V backend. - -> [!NOTE] -> -> If you're using PowerShell, you need to use the `ArgumentList` parameter before any flags. -> For example: -> ```powershell -> Start-Process 'Docker Desktop Installer.exe' -Wait -ArgumentList 'install', '--accept-license' -> ``` - -If your admin account is different to your user account, you must add the user to the **docker-users** group: - -```console -$ net localgroup docker-users /add -``` ## Start Docker Desktop @@ -250,7 +300,7 @@ Docker Desktop does not start automatically after installation. To start Docker 2. The Docker menu ({{< inline-image src="images/whale-x.svg" alt="whale menu" >}}) displays the Docker Subscription Service Agreement. - {{< include "desktop-license-update.md" >}} + {{% include "desktop-license-update.md" %}} 3. Select **Accept** to continue. Docker Desktop starts after you accept the terms. @@ -260,7 +310,7 @@ Docker Desktop does not start automatically after installation. To start Docker > [!TIP] > -> As an IT administrator, you can use endpoint management (MDM) software to identify the number of Docker Desktop instances and their versions within your environment. This can provide accurate license reporting, help ensure your machines use the latest version of Docker Desktop, and enable you to [enforce sign-in](/manuals/security/for-admins/enforce-sign-in/_index.md). +> As an IT administrator, you can use endpoint management (MDM) software to identify the number of Docker Desktop instances and their versions within your environment. This can provide accurate license reporting, help ensure your machines use the latest version of Docker Desktop, and enable you to [enforce sign-in](/manuals/enterprise/security/enforce-sign-in/_index.md). > - [Intune](https://learn.microsoft.com/en-us/mem/intune/apps/app-discovered-apps) > - [Jamf](https://docs.jamf.com/10.25.0/jamf-pro/administrator-guide/Application_Usage.html) > - [Kandji](https://support.kandji.io/support/solutions/articles/72000559793-view-a-device-application-list) diff --git a/content/manuals/desktop/setup/install/windows-permission-requirements.md b/content/manuals/desktop/setup/install/windows-permission-requirements.md index aded11b07d40..d9ad5f9a249e 100644 --- a/content/manuals/desktop/setup/install/windows-permission-requirements.md +++ b/content/manuals/desktop/setup/install/windows-permission-requirements.md @@ -2,6 +2,7 @@ description: Understand permission requirements for Docker Desktop for Windows keywords: Docker Desktop, Windows, security, install title: Understand permission requirements for Windows +linkTitle: Windows permission requirements aliases: - /desktop/windows/privileged-helper/ - /desktop/windows/permission-requirements/ @@ -9,25 +10,38 @@ aliases: weight: 40 --- -This page contains information about the permission requirements for running and installing Docker Desktop on Windows, the functionality of the privileged helper process `com.docker.service` and the reasoning behind this approach. +This page contains information about the permission requirements for running and installing Docker Desktop on Windows, the functionality of the privileged helper process `com.docker.service`, and the reasoning behind this approach. It also provides clarity on running containers as `root` as opposed to having `Administrator` access on the host and the privileges of the Windows Docker engine and Windows containers. +Docker Desktop on Windows is designed with security in mind. Administrative rights are only required when absolutely necessary. + ## Permission requirements -While Docker Desktop on Windows can be run without having `Administrator` privileges, it does require them during installation. On installation you receive a UAC prompt which allows a privileged helper service to be installed. After that, Docker Desktop can be run without administrator privileges, provided you are members of the `docker-users` group. If you performed the installation, you are automatically added to this group, but other users must be added manually. This allows the administrator to control who has access to Docker Desktop. +While Docker Desktop on Windows can be run without having `Administrator` privileges, it does require them during installation. On installation you receive a UAC prompt which allows a privileged helper service to be installed. After that, Docker Desktop can be run without administrator privileges. + +Running Docker Desktop on Windows without the privileged helper does not require users to have `docker-users` group membership. However, +some features that require privileged operations will have this requirement. + +If you performed the installation, you are automatically added to this group, but other users must be added manually. This allows the administrator to control who has access to features that require higher privileges, such as creating and managing the Hyper-V VM, or using Windows containers. -The reason for this approach is that Docker Desktop needs to perform a limited set of privileged operations which are conducted by the privileged helper process `com.docker.service`. This approach allows, following the principle of least privilege, `Administrator` access to be used only for the operations for which it is absolutely necessary, while still being able to use Docker Desktop as an unprivileged user. +When Docker Desktop launches, all non-privileged named pipes are created so that only the following users can access them: +- The user that launched Docker Desktop. +- Members of the local `Administrators` group. +- The `LOCALSYSTEM` account. ## Privileged helper +Docker Desktop needs to perform a limited set of privileged operations which are conducted by the privileged helper process `com.docker.service`. This approach allows, following the principle of least privilege, `Administrator` access to be used only for the operations for which it is absolutely necessary, while still being able to use Docker Desktop as an unprivileged user. + + The privileged helper `com.docker.service` is a Windows service which runs in the background with `SYSTEM` privileges. It listens on the named pipe `//./pipe/dockerBackendV2`. The developer runs the Docker Desktop application, which connects to the named pipe and sends commands to the service. This named pipe is protected, and only users that are part of the `docker-users` group can have access to it. The service performs the following functionalities: - Ensuring that `kubernetes.docker.internal` is defined in the Win32 hosts file. Defining the DNS name `kubernetes.docker.internal` allows Docker to share Kubernetes contexts with containers. - Ensuring that `host.docker.internal` and `gateway.docker.internal` are defined in the Win32 hosts file. They point to the host local IP address and allow an application to resolve the host IP using the same name from either the host itself or a container. - Securely caching the Registry Access Management policy which is read-only for the developer. -- Creating the Hyper-V VM `"DockerDesktopVM"` and managing its lifecycle - starting, stopping and destroying it. The VM name is hard coded in the service code so the service cannot be used for creating or manipulating any other VMs. +- Creating the Hyper-V VM `"DockerDesktopVM"` and managing its lifecycle - starting, stopping, and destroying it. The VM name is hard coded in the service code so the service cannot be used for creating or manipulating any other VMs. - Moving the VHDX file or folder. - Starting and stopping the Windows Docker engine and querying whether it's running. - Deleting all Windows containers data files. @@ -38,7 +52,7 @@ The service performs the following functionalities: The service start mode depends on which container engine is selected, and, for WSL, on whether it is needed to maintain `host.docker.internal` and `gateway.docker.internal` in the Win32 hosts file. This is controlled by a setting under `Use the WSL 2 based engine` in the settings page. When this is set, WSL engine behaves the same as Hyper-V. So: - With Windows containers, or Hyper-v Linux containers, the service is started when the system boots and runs all the time, even when Docker Desktop isn't running. This is required so you can launch Docker Desktop without admin privileges. -- With WSL2 Linux containers, the service isn't necessary and therefore doesn't run automatically when the system boots. When you switch to Windows containers or Hyper-V Linux containers, or choose to maintain `host.docker.internal` and `gateway.docker.internal` in the Win32 hosts file, a UAC prompt is displayed which asks you to accept the privileged operation to start the service. If accepted, the service is started and set to start automatically upon the next Windows boot. +- With WSL2 Linux containers, the service isn't necessary and therefore doesn't run automatically when the system boots. When you switch to Windows containers or Hyper-V Linux containers, or choose to maintain `host.docker.internal` and `gateway.docker.internal` in the Win32 hosts file, a UAC prompt appears asking you to accept the privileged operation to start the service. If accepted, the service is started and set to start automatically upon the next Windows boot. ## Containers running as root within the Linux VM @@ -49,13 +63,12 @@ installed software. This means that although containers run by default as access to the Windows host machine. The Linux VM serves as a security boundary and limits what resources from the host can be accessed. File sharing uses a user-space crafted file server and any directories from the host bind mounted -into Docker containers still retain their original permissions. It doesn't give -you access to any files that it doesn’t already have access to. +into Docker containers still retain their original permissions. Containers don't have access to any host files beyond those explicitly shared. ## Enhanced Container Isolation In addition, Docker Desktop supports [Enhanced Container Isolation -mode](/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/_index.md) (ECI), +mode](/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/_index.md) (ECI), available to Business customers only, which further secures containers without impacting developer workflows. @@ -65,9 +78,13 @@ Desktop VM. ECI uses this and other advanced techniques to further secure containers within the Docker Desktop Linux VM, such that they are further isolated from the Docker daemon and other services running inside the VM. -## Windows Containers +## Windows containers + +> [!WARNING] +> +> Enabling Windows containers has important security implications. -Unlike the Linux Docker engine and containers which run in a VM, Windows containers are an operating system feature, and run directly on the Windows host with `Administrator` privileges. For organizations who don't want their developers to run Windows containers, a `–no-windows-containers` installer flag is available from version 4.11 to disable their use. +Unlike the Linux Docker Engine and containers which run in a VM, Windows containers are implemented using operating system features, and run directly on the Windows host. If you enable Windows containers during installation, the `ContainerAdministrator` user used for administration inside the container is a local administrator on the host machine. Enabling Windows containers during installation makes it so that members of the `docker-users` group are able to elevate to administrators on the host. For organizations who don't want their developers to run Windows containers, a `-–no-windows-containers` installer flag is available to disable their use. ## Networking diff --git a/content/manuals/desktop/setup/sign-in.md b/content/manuals/desktop/setup/sign-in.md index 72ac850af5ce..9fa2e967c988 100644 --- a/content/manuals/desktop/setup/sign-in.md +++ b/content/manuals/desktop/setup/sign-in.md @@ -30,9 +30,9 @@ aliases: - /desktop/get-started/ --- -Docker recommends that you authenticate using the **Sign in** option in the top-right corner of the Docker Dashboard. +Docker recommends signing in with the **Sign in** option in the top-right corner of the Docker Dashboard. -In large enterprises where admin access is restricted, administrators can [enforce sign-in](/manuals/security/for-admins/enforce-sign-in/_index.md). +In large enterprises where admin access is restricted, administrators can [enforce sign-in](/manuals/enterprise/security/enforce-sign-in/_index.md). > [!TIP] > @@ -40,11 +40,11 @@ In large enterprises where admin access is restricted, administrators can [enfor ## Benefits of signing in -- You can access your Docker Hub repositories directly from Docker Desktop. +- Access your Docker Hub repositories directly from Docker Desktop. -- Authenticated users also get a higher pull rate limit compared to anonymous users. For more information, see [Usage and limits](/manuals/docker-hub/usage/_index.md). +- Increase your pull rate limit compared to anonymous users. See [Usage and limits](/manuals/docker-hub/usage/_index.md). -- Improve your organization’s security posture for containerized development by taking advantage of [Hardened Desktop](/manuals/security/for-admins/hardened-desktop/_index.md). +- Enhance your organization’s security posture for containerized development with [Hardened Desktop](/manuals/enterprise/security/hardened-desktop/_index.md). > [!NOTE] > @@ -52,49 +52,40 @@ In large enterprises where admin access is restricted, administrators can [enfor ## Signing in with Docker Desktop for Linux -Docker Desktop for Linux relies on [`pass`](https://www.passwordstore.org/) to store credentials in gpg2-encrypted files. +Docker Desktop for Linux relies on [`pass`](https://www.passwordstore.org/) to store credentials in GPG-encrypted files. Before signing in to Docker Desktop with your [Docker ID](/accounts/create-account/), you must initialize `pass`. -Docker Desktop displays a warning if you've not initialized `pass`. +Docker Desktop displays a warning if `pass` is not configured. -You can initialize pass by using a gpg key. To generate a gpg key, run: +1. Generate a GPG key. You can initialize pass by using a gpg key. To generate a gpg key, run: -``` console -$ gpg --generate-key -``` + ``` console + $ gpg --generate-key + ``` +2. Enter your name and email once prompted. -The following is an example similar to what you see once you run the previous command: + Once confirmed, GPG creates a key pair. Look for the `pub` line that contains your GPG ID, for example: -```console {hl_lines=12} -... -GnuPG needs to construct a user ID to identify your key. + ```text + ... + pubrsa3072 2022-03-31 [SC] [expires: 2024-03-30] + 3ABCD1234EF56G78 + uid Molly + ``` +3. Copy the GPG ID and use it to initialize `pass` -Real name: Molly -Email address: molly@example.com -You selected this USER-ID: - "Molly " + ```console + $ pass init + ``` -Change (N)ame, (E)mail, or (O)kay/(Q)uit? O -... -pubrsa3072 2022-03-31 [SC] [expires: 2024-03-30] - -uid Molly -subrsa3072 2022-03-31 [E] [expires: 2024-03-30] -``` - -To initialize `pass`, run the following command using the public key generated from the previous command: - -```console -$ pass init -``` -The following is an example similar to what you see once you run the previous command: + You should see output similar to: -```console -mkdir: created directory '/home/molly/.password-store/' -Password store initialized for -``` + ```text + mkdir: created directory '/home/molly/.password-store/' + Password store initialized for + ``` Once you initialize `pass`, you can sign in and pull your private images. -When Docker CLI or Docker Desktop use credentials, a user prompt may pop up for the password you set during the gpg key generation. +When Docker CLI or Docker Desktop use credentials, a user prompt may pop up for the password you set during the GPG key generation. ```console $ docker pull molly/privateimage @@ -109,5 +100,5 @@ docker.io/molly/privateimage:latest ## What's next? - [Explore Docker Desktop](/manuals/desktop/use-desktop/_index.md) and its features. -- Change your Docker Desktop settings -- [Browse common FAQs](/manuals/desktop/troubleshoot-and-support/faqs/general.md) +- Change your [Docker Desktop settings](/manuals/desktop/settings-and-maintenance/settings.md). +- [Browse common FAQs](/manuals/desktop/troubleshoot-and-support/faqs/general.md). diff --git a/content/manuals/desktop/setup/vm-vdi.md b/content/manuals/desktop/setup/vm-vdi.md index 4e315c0bac0b..89459200f3a4 100644 --- a/content/manuals/desktop/setup/vm-vdi.md +++ b/content/manuals/desktop/setup/vm-vdi.md @@ -2,41 +2,97 @@ description: Instructions on how to enable nested virtualization keywords: nested virtualization, Docker Desktop, windows, VM, VDI environment title: Run Docker Desktop for Windows in a VM or VDI environment +linkTitle: VM or VDI environments aliases: - /desktop/nested-virtualization/ - /desktop/vm-vdi/ weight: 30 --- -In general, we recommend running Docker Desktop natively on either Mac, Linux, or Windows. However, Docker Desktop for Windows can run inside a virtual desktop provided the virtual desktop is properly configured. +Docker recommends running Docker Desktop natively on Mac, Linux, or Windows. However, Docker Desktop for Windows can run inside a virtual desktop provided the virtual desktop is properly configured. -To run Docker Desktop in a virtual desktop environment, it is essential nested virtualization is enabled on the virtual machine that provides the virtual desktop. This is because, under the hood, Docker Desktop is using a Linux VM in which it runs Docker Engine and the containers. +To run Docker Desktop in a virtual desktop environment, you have two options, +depending on whether nested virtualization is supported: -## Virtual desktop support +- If your environment supports nested virtualization, you can run Docker Desktop + with its default local Linux VM. +- If nested virtualization is not supported, Docker recommends using [Docker + Offload](/offload/). + +## Use Docker Offload + +Docker Offload lets you offload container workloads to a high-performance, fully +hosted cloud environment, enabling a seamless hybrid experience. + +Docker Offload is useful in virtual desktop environments where nested +virtualization isn't supported. In these environments, Docker Desktop defaults +to using Docker Offload to ensure you can still build and run containers without +relying on local virtualization. + +Docker Offload decouples the Docker Desktop client from the Docker Engine, +allowing the Docker CLI and Docker Desktop Dashboard to interact with +cloud-based resources as if they were local. When you run a container, Docker +provisions a secure, isolated, and ephemeral cloud environment connected to +Docker Desktop via an SSH tunnel. Despite running remotely, features like bind +mounts and port forwarding continue to work seamlessly, providing a local-like +experience. To use Docker Offload: + +To get started using Docker Offload, see the [Docker Offload +quickstart](/offload/quickstart/). + +## Virtual desktop support when using nested virtualization > [!NOTE] > > Support for running Docker Desktop on a virtual desktop is available to Docker Business customers, on VMware ESXi or Azure VMs only. -The support available from Docker extends to installing and running Docker Desktop inside the VM, once the nested virtualization is set up correctly. The only hypervisors we have successfully tested are VMware ESXi and Azure, and there is no support for other VMs. For more information on Docker Desktop support, see [Get support](/manuals/desktop/troubleshoot-and-support/support.md). +Docker support includes installing and running Docker Desktop within the VM, provided that nested virtualization is correctly enabled. The only hypervisors successfully tested are VMware ESXi and Azure, and there is no support for other VMs. For more information on Docker Desktop support, see [Get support](/manuals/desktop/troubleshoot-and-support/support.md). -For troubleshooting problems and intermittent failures that are outside of Docker's control, you should contact your hypervisor vendor. Each hypervisor vendor offers different levels of support. For example, Microsoft supports running nested Hyper-V both on-prem and on Azure, with some version constraints. This may not be the case for VMWare ESXi. +For troubleshooting problems and intermittent failures that are outside of Docker's control, you should contact your hypervisor vendor. Each hypervisor vendor offers different levels of support. For example, Microsoft supports running nested Hyper-V both on-prem and on Azure, with some version constraints. This may not be the case for VMware ESXi. -Docker does not support running multiples instances of Docker Desktop on the same machine in a VM or VDI environment. +Docker does not support running multiple instances of Docker Desktop on the same machine in a VM or VDI environment. + +> [!TIP] +> +> If you're running Docker Desktop inside a Citrix VDI, note that Citrix can be used with a variety of underlying hypervisors, for example VMware, Hyper-V, Citrix Hypervisor/XenServer. Docker Desktop requires nested virtualization, which is not supported by Citrix Hypervisor/XenServer. +> +> Check with your Citrix administrator or VDI infrastructure team to confirm which hypervisor is being used, and whether nested virtualization is enabled. ## Turn on nested virtualization -You must turn on nested virtualization before you install Docker Desktop on a virtual machine. +You must turn on nested virtualization before you install Docker Desktop on a +virtual machine that will not use Docker Cloud. ### Turn on nested virtualization on VMware ESXi -Nested virtualization of other hypervisors like Hyper-V inside a vSphere VM [is not a supported scenario](https://kb.vmware.com/s/article/2009916). However, running Hyper-V VM in a VMware ESXi VM is technically possible and, depending on the version, ESXi includes hardware-assisted virtualization as a supported feature. For internal testing, we used a VM that had 1 CPU with 4 cores and 12GB of memory. +Nested virtualization of other hypervisors like Hyper-V inside a vSphere VM [is not a supported scenario](https://kb.vmware.com/s/article/2009916). However, running Hyper-V VM in a VMware ESXi VM is technically possible and, depending on the version, ESXi includes hardware-assisted virtualization as a supported feature. A VM that had 1 CPU with 4 cores and 12GB of memory was used for internal testing. For steps on how to expose hardware-assisted virtualization to the guest OS, [see VMware's documentation](https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.vm_admin.doc/GUID-2A98801C-68E8-47AF-99ED-00C63E4857F6.html). - ### Turn on nested virtualization on an Azure Virtual Machine Nested virtualization is supported by Microsoft for running Hyper-V inside an Azure VM. -For Azure virtual machines, [check that the VM size chosen supports nested virtualization](https://docs.microsoft.com/en-us/azure/virtual-machines/sizes). Microsoft provides [a helpful list on Azure VM sizes](https://docs.microsoft.com/en-us/azure/virtual-machines/acu) and highlights the sizes that currently support nested virtualization. For internal testing, we used D4s_v5 machines. We recommend this specification or above for optimal performance of Docker Desktop. +For Azure virtual machines, [check that the VM size chosen supports nested virtualization](https://docs.microsoft.com/en-us/azure/virtual-machines/sizes). Microsoft provides [a helpful list on Azure VM sizes](https://docs.microsoft.com/en-us/azure/virtual-machines/acu) and highlights the sizes that currently support nested virtualization. D4s_v5 machines were used for internal testing. Use this specification or above for optimal performance of Docker Desktop. + +## Docker Desktop support on Nutanix-powered VDI + +Docker Desktop can be used within Nutanix-powered VDI environments provided that the underlying Windows environment supports WSL 2 or Windows container mode. Since Nutanix officially supports WSL 2, Docker Desktop should function as expected, as long as WSL 2 operates correctly within the VDI environment. + +If using Windows container mode, confirm that the Nutanix environment supports Hyper-V or alternative Windows container backends. + +### Supported configurations + +Docker Desktop follows the VDI support definitions outlined [previously](#virtual-desktop-support-when-using-nested-virtualization): + + - Persistent VDI environments (Supported): You receive the same virtual desktop instance across sessions, preserving installed software and configurations. + + - Non-persistent VDI environments (Not supported): Docker Desktop does not support environments where the OS resets between sessions, requiring re-installation or reconfiguration each time. + +### Support scope and responsibilities + +For WSL 2-related issues, contact Nutanix support. For Docker Desktop-specific issues, contact Docker support. + +## Aditional resources + +- [Docker Desktop on Microsoft Dev Box](/manuals/enterprise/enterprise-deployment/dev-box.md) \ No newline at end of file diff --git a/content/manuals/desktop/troubleshoot-and-support/faqs/_index.md b/content/manuals/desktop/troubleshoot-and-support/faqs/_index.md index 026ecb568559..3a881d91a410 100644 --- a/content/manuals/desktop/troubleshoot-and-support/faqs/_index.md +++ b/content/manuals/desktop/troubleshoot-and-support/faqs/_index.md @@ -3,6 +3,4 @@ build: render: never title: FAQs weight: 30 -aliases: - - /desktop/faqs/ --- diff --git a/content/manuals/desktop/troubleshoot-and-support/faqs/general.md b/content/manuals/desktop/troubleshoot-and-support/faqs/general.md index 2a48392d84ed..13c950c0ba52 100644 --- a/content/manuals/desktop/troubleshoot-and-support/faqs/general.md +++ b/content/manuals/desktop/troubleshoot-and-support/faqs/general.md @@ -9,6 +9,7 @@ aliases: - /docker-for-mac/faqs/ - /docker-for-windows/faqs/ - /desktop/faqs/ +- /desktop/faqs/general/ weight: 10 --- @@ -21,16 +22,16 @@ This includes: - The resources in the [Learning Center](/manuals/desktop/use-desktop/_index.md) - Pulling or pushing an image to Docker Hub -- [Image Access Management](/manuals/security/for-developers/access-tokens.md) +- [Image Access Management](/manuals/security/access-tokens.md) - [Static vulnerability scanning](/manuals/docker-hub/repos/manage/vulnerability-scanning.md) - Viewing remote images in the Docker Dashboard -- Setting up [Dev Environments](/manuals/desktop/features/dev-environments/_index.md) - Docker Build when using [BuildKit](/manuals/build/buildkit/_index.md#getting-started). You can work around this by disabling BuildKit. Run `DOCKER_BUILDKIT=0 docker build .` to disable BuildKit. - [Kubernetes](/manuals/desktop/features/kubernetes.md) (Images are download when you enable Kubernetes for the first time) - Checking for updates - [In-app diagnostics](/manuals/desktop/troubleshoot-and-support/troubleshoot/_index.md#diagnose-from-the-app) (including the [Self-diagnose tool](/manuals/desktop/troubleshoot-and-support/troubleshoot/_index.md#diagnose-from-the-app)) - Sending usage statistics +- When `networkMode` is set to `mirrored` ### How do I connect to the remote Docker Engine API? @@ -56,7 +57,7 @@ For details, see [Docker Engine API](/reference/api/engine/_index.md). ### How do I connect from a container to a service on the host? The host has a changing IP address, or none if you have no network access. -We recommend that you connect to the special DNS name `host.docker.internal`, +It is recommend that you connect to the special DNS name `host.docker.internal`, which resolves to the internal IP address used by the host. For more information and examples, see [how to connect from a container to a service on the host](/manuals/desktop/features/networking.md#i-want-to-connect-from-a-container-to-a-service-on-the-host). diff --git a/content/manuals/desktop/troubleshoot-and-support/faqs/linuxfaqs.md b/content/manuals/desktop/troubleshoot-and-support/faqs/linuxfaqs.md index 0cd0dbc98566..c9b33869772c 100644 --- a/content/manuals/desktop/troubleshoot-and-support/faqs/linuxfaqs.md +++ b/content/manuals/desktop/troubleshoot-and-support/faqs/linuxfaqs.md @@ -6,6 +6,7 @@ linkTitle: Linux tags: [FAQ] aliases: - /desktop/linux/space/ +- /desktop/faqs/linuxfaqs/ weight: 40 --- @@ -54,11 +55,11 @@ and `/etc/subgid` (see `subgid(5)`) must be present. Docker Desktop only supports subordinate ID delegation configured via files. Docker Desktop maps the current user ID and GID to 0 in the containers. It uses the first entry corresponding to the current user in `/etc/subuid` and `/etc/subgid` to set up -mappings for IDs above 0 in the containers. +mappings for IDs greater than 0 in the containers. | ID in container | ID on host | | --------------- | -------------------------------------------------------------------------------- | -| 0 (root) | ID of the user running DD (e.g. 1000) | +| 0 (root) | ID of the user running Docker Desktop (e.g. 1000) | | 1 | 0 + beginning of ID range specified in `/etc/subuid`/`/etc/subgid` (e.g. 100000) | | 2 | 1 + beginning of ID range specified in `/etc/subuid`/`/etc/subgid` (e.g. 100001) | | 3 | 2 + beginning of ID range specified in `/etc/subuid`/`/etc/subgid` (e.g. 100002) | @@ -120,7 +121,7 @@ To move the disk image file to a different location: 2. In the **Disk image location** section, select **Browse** and choose a new location for the disk image. -3. Select **Apply & Restart** for the changes to take effect. +3. Select **Apply** for the changes to take effect. Do not move the file directly in Finder as this can cause Docker Desktop to lose track of the file. @@ -182,6 +183,6 @@ To reduce the maximum size of the disk image file: 2. The **Disk image size** section contains a slider that allows you to change the maximum size of the disk image. Adjust the slider to set a lower limit. -3. Select **Apply & Restart**. +3. Select **Apply**. When you reduce the maximum size, the current disk image file is deleted, and therefore, all containers and images are lost. diff --git a/content/manuals/desktop/troubleshoot-and-support/faqs/macfaqs.md b/content/manuals/desktop/troubleshoot-and-support/faqs/macfaqs.md index 84b109f4a247..f9fea6020fa5 100644 --- a/content/manuals/desktop/troubleshoot-and-support/faqs/macfaqs.md +++ b/content/manuals/desktop/troubleshoot-and-support/faqs/macfaqs.md @@ -7,37 +7,20 @@ tags: [FAQ] aliases: - /desktop/mac/space/ - /docker-for-mac/space/ +- /desktop/faqs/macfaqs/ weight: 20 --- -### Why do I keep getting a notification telling me an application has changed my Desktop configurations? - -You receive this notification because the Configuration integrity check feature has detected that a third-party application has altered your Docker Desktop configuration. This usually happens due to incorrect or missing symlinks. The notification ensures you are aware of these changes so you can review and repair any potential issues to maintain system reliability. - -Opening the notification presents a pop-up window which provides detailed information about the detected integrity issues. - -If you choose to ignore the notification, it will be shown again only at the next Docker Desktop startup. If you choose to repair your configuration, you won't be prompted again. - -If you want to switch off Configuration integrity check notifications, navigate to Docker Desktop's settings and in the **General** tab, clear the **Automatically check configuration** setting. - -If you have feedback on how to further improve the Configuration integrity check feature, [fill out the feedback form](https://docs.google.com/forms/d/e/1FAIpQLSeD_Odqc__4ihRXDtH_ba52QJuaKZ00qGnNa_tM72MmH32CZw/viewform). - ### What is HyperKit? HyperKit is a hypervisor built on top of the Hypervisor.framework in macOS. It runs entirely in userspace and has no other dependencies. -We use HyperKit to eliminate the need for other VM products, such as Oracle -VirtualBox or VMWare Fusion. +Docker uses HyperKit to eliminate the need for other VM products, such as Oracle +VirtualBox or VMware Fusion. ### What is the benefit of HyperKit? -HyperKit is thinner than VirtualBox and VMWare fusion, and the version included is customized for Docker workloads on Mac. - -### Why is com.docker.vmnetd still running after I quit the app? - -The privileged helper process `com.docker.vmnetd` is started by `launchd` and -runs in the background. The process does not consume any resources unless -`Docker.app` connects to it, so it's safe to ignore. +HyperKit is thinner than VirtualBox and VMware fusion, and the version included is customized for Docker workloads on Mac. ### Where does Docker Desktop store Linux containers and images? @@ -65,7 +48,7 @@ To move the disk image file to a different location: 2. In the **Disk image location** section, select **Browse** and choose a new location for the disk image. -3. Select **Apply & Restart** for the changes to take effect. +3. Select **Apply** for the changes to take effect. > [!IMPORTANT] > @@ -129,7 +112,7 @@ To reduce the maximum size of the disk image file: 2. The **Disk image size** section contains a slider that allows you to change the maximum size of the disk image. Adjust the slider to set a lower limit. -3. Select **Apply & Restart**. +3. Select **Apply**. When you reduce the maximum size, the current disk image file is deleted, and therefore, all containers and images are lost. diff --git a/content/manuals/desktop/troubleshoot-and-support/faqs/windowsfaqs.md b/content/manuals/desktop/troubleshoot-and-support/faqs/windowsfaqs.md index 949fc9574358..ea1da99fc25f 100644 --- a/content/manuals/desktop/troubleshoot-and-support/faqs/windowsfaqs.md +++ b/content/manuals/desktop/troubleshoot-and-support/faqs/windowsfaqs.md @@ -19,18 +19,6 @@ Docker Desktop uses the Windows Hyper-V features. While older Windows versions h No, running Docker Desktop on Windows Server is not supported. -### Can I change permissions on shared volumes for container-specific deployment requirements? - -Docker Desktop does not enable you to control (`chmod`) -the Unix-style permissions on [shared volumes](/manuals/desktop/settings-and-maintenance/settings.md#file-sharing) for -deployed containers, but rather sets permissions to a default value of -[0777](https://chmodcommand.com/chmod-0777/) -(`read`, `write`, `execute` permissions for `user` and for -`group`) which is not configurable. - -For workarounds and to learn more, see -[Permissions errors on data directories for shared volumes](/manuals/desktop/troubleshoot-and-support/troubleshoot/topics.md#permissions-errors-on-data-directories-for-shared-volumes). - ### How do symlinks work on Windows? Docker Desktop supports two types of symlinks: Windows native symlinks and symlinks created inside a container. diff --git a/content/manuals/desktop/troubleshoot-and-support/feedback.md b/content/manuals/desktop/troubleshoot-and-support/feedback.md index 513ef1e307e4..ba7759f24a96 100644 --- a/content/manuals/desktop/troubleshoot-and-support/feedback.md +++ b/content/manuals/desktop/troubleshoot-and-support/feedback.md @@ -4,25 +4,22 @@ keywords: Feedback, Docker Desktop, Linux, Mac, Windows, Dev Environments, Exten Community forum, bugs, problems, issues title: Give feedback weight: 40 +aliases: + - /desktop/feedback/ --- There are many ways you can provide feedback on Docker Desktop or Docker Desktop features. ### In-product feedback -On each Docker Desktop Dashboard view, there is a **Give feedback** link. This sends you to a Google feedback form where you can share your feedback and ideas. - -You can also use the `docker feedback` command to submit feedback directly from the command line. - - +On each Docker Desktop Dashboard view, there is a **Give feedback** link. This opens a feedback form where you can share ideas directly with the Docker team. ### Feedback via Docker Community forums To get help from the community, review current user topics, join or start a discussion, sign in to the appropriate Docker forums: -- [Docker Desktop for Mac -forum](https://forums.docker.com/c/docker-for-mac) +- [Docker Desktop for Mac forum](https://forums.docker.com/c/docker-for-mac) - [Docker Desktop for Windows forum](https://forums.docker.com/c/docker-for-windows) - [Docker Desktop for Linux forum](https://forums.docker.com/c/docker-desktop-for-linux/60) @@ -34,7 +31,6 @@ GitHub](https://github.com/docker/for-mac/issues) - [Docker Desktop for Windows issues on GitHub](https://github.com/docker/for-win/issues) - [Docker Desktop for Linux issues on GitHub](https://github.com/docker/desktop-linux/issues) -- [Dev Environments issues on Github](https://github.com/docker/dev-environments/issues) - [Docker Extensions issues on GitHub](https://github.com/docker/extensions-sdk/issues) ### Feedback via Community Slack channels @@ -44,5 +40,4 @@ You can also provide feedback through the following [Docker Community Slack](htt - #docker-desktop-mac - #docker-desktop-windows - #docker-desktop-linux -- #docker-dev-environments - #extensions diff --git a/content/manuals/desktop/troubleshoot-and-support/support.md b/content/manuals/desktop/troubleshoot-and-support/support.md index 6d53c2bf9285..1608d0577aa2 100644 --- a/content/manuals/desktop/troubleshoot-and-support/support.md +++ b/content/manuals/desktop/troubleshoot-and-support/support.md @@ -1,7 +1,7 @@ --- description: See what support is available for Docker Desktop keywords: Support, Docker Desktop, Linux, Mac, Windows -title: Get support +title: Get support for Docker Desktop weight: 20 aliases: - /desktop/support/ @@ -14,58 +14,63 @@ aliases: ### How do I get Docker Desktop support? +> [!TIP] +> +> Before reaching out for support, follow the appropriate [Diagnose steps](/manuals/desktop/troubleshoot-and-support/troubleshoot/_index.md#diagnose) in the troubleshooting documentation. + If you have a paid Docker subscription, you can [contact the Support team](https://hub.docker.com/support/contact/). All Docker users can seek support through the following resources, where Docker or the community respond on a best effort basis. - - [Docker Desktop for Windows GitHub repo](https://github.com/docker/for-win) - - [Docker Desktop for Mac GitHub repo](https://github.com/docker/for-mac) - - [Docker Desktop for Linux GitHub repo](https://github.com/docker/desktop-linux) - - [Docker Community Forums](https://forums.docker.com/) - - [Docker Community Slack](http://dockr.ly/comm-slack) +- [Docker Desktop for Windows GitHub repo](https://github.com/docker/for-win) +- [Docker Desktop for Mac GitHub repo](https://github.com/docker/for-mac) +- [Docker Desktop for Linux GitHub repo](https://github.com/docker/desktop-linux) +- [Docker Community Forums](https://forums.docker.com/) +- [Docker Community Slack](http://dockr.ly/comm-slack) + ### What support can I get? -* Account management related issues - * Automated builds - * Basic product 'how to' questions - * Billing or subscription issues - * Configuration issues - * Desktop installation issues - * Installation crashes - * Failure to launch Docker Desktop on first run - * Desktop update issues - * Sign-in issues in both the command line interface and Docker Hub user interface - * Push or pull issues, including rate limiting - * Usage issues - * Crash closing software - * Docker Desktop not behaving as expected - - For Windows users, you can also request support on: - * Turning on virtualization in BIOS - * Turning on Windows features - * Running inside [certain VM or VDI environments](/manuals/desktop/setup/vm-vdi.md) (Docker Business customers only) +- Account management related issues +- Automated builds +- Basic product 'how to' questions +- Billing or subscription issues +- Configuration issues +- Desktop installation issues + - Installation crashes + - Failure to launch Docker Desktop on first run +- Desktop update issues +- Sign-in issues in both the command line interface and Docker Hub user interface +- Push or pull issues, including rate limiting +- Usage issues + - Crash closing software + - Docker Desktop not behaving as expected + +For Windows users, you can also request support on: +- Turning on virtualization in BIOS +- Turning on Windows features +- Running inside [certain VM or VDI environments](/manuals/desktop/setup/vm-vdi.md) (Docker Business customers only) ### What is not supported? Docker Desktop excludes support for the following types of issues: -* Use on or in conjunction with hardware or software other than that specified in the applicable documentation -* Running on unsupported operating systems, including beta/preview versions of operating systems -* Running containers of a different architecture using emulation -* Support for the Docker engine, Docker CLI, or other bundled Linux components -* Support for Kubernetes -* Features labeled as experimental -* System/Server administration activities -* Supporting Desktop as a production runtime -* Scale deployment/multi-machine installation of Desktop -* Routine product maintenance (data backup, cleaning disk space and configuring log rotation) -* Third-party applications not provided by Docker -* Altered or modified Docker software -* Defects in the Docker software due to hardware malfunction, abuse, or improper use -* Any version of the Docker software other than the latest version -* Reimbursing and expenses spent for third-party services not provided by Docker -* Docker support excludes training, customization, and integration -* Running multiple instances of Docker Desktop on a single machine +- Use on or in conjunction with hardware or software other than that specified in the applicable documentation +- Running on unsupported operating systems, including beta/preview versions of operating systems +- Running containers of a different architecture using emulation +- Support for Docker Engine, Docker CLI, or other bundled Linux components +- Support for Kubernetes +- Features labeled as experimental +- System/Server administration activities +- Supporting Desktop as a production runtime +- Scale deployment/multi-machine installation of Desktop +- Routine product maintenance (data backup, cleaning disk space and configuring log rotation) +- Third-party applications not provided by Docker +- Altered or modified Docker software +- Defects in the Docker software due to hardware malfunction, abuse, or improper use +- Any version of the Docker software other than the latest version +- Reimbursing and expenses spent for third-party services not provided by Docker +- Docker support excludes training, customization, and integration +- Running multiple instances of Docker Desktop on a single machine > [!NOTE] > @@ -80,17 +85,17 @@ For Pro and Team customers, Docker only offers support for the latest version of ### How many machines can I get support for Docker Desktop on? As a Pro user you can get support for Docker Desktop on a single machine. -As a Team, you can get support for Docker Desktop for the number of machines equal to the number of seats as part of your plan. +As a Team, you can get support for Docker Desktop for the number of machines equal to the number of seats as part of your subscription. ### What OS’s are supported? Docker Desktop is available for Mac, Linux, and Windows. The supported version information can be found on the following pages: -* [Mac system requirements](/manuals/desktop/setup/install/mac-install.md#system-requirements) -* [Windows system requirements](/manuals/desktop/setup/install/windows-install.md#system-requirements) -* [Linux system requirements](/manuals/desktop/setup/install/linux/_index.md#system-requirements) +- [Mac system requirements](/manuals/desktop/setup/install/mac-install.md#system-requirements) +- [Windows system requirements](/manuals/desktop/setup/install/windows-install.md#system-requirements) +- [Linux system requirements](/manuals/desktop/setup/install/linux/_index.md#system-requirements) -### How is personal diagnostic data handled in Docker Desktop? +### How is personal diagnostic data handled in Docker Desktop when I'm getting support? When uploading diagnostics to help Docker with investigating issues, the uploaded diagnostics bundle may contain personal data such as usernames and IP addresses. The diagnostics bundles are only accessible to Docker, Inc. employees who are directly involved in diagnosing Docker Desktop issues. diff --git a/content/manuals/desktop/troubleshoot-and-support/troubleshoot/_index.md b/content/manuals/desktop/troubleshoot-and-support/troubleshoot/_index.md index 8e127ce261d2..88aea4c1c81d 100644 --- a/content/manuals/desktop/troubleshoot-and-support/troubleshoot/_index.md +++ b/content/manuals/desktop/troubleshoot-and-support/troubleshoot/_index.md @@ -14,7 +14,7 @@ aliases: - /windows/troubleshoot/ - /docker-for-win/troubleshoot/ - /docker-for-windows/troubleshoot/ - - /desktop/troubleshoot/overview/ + - /desktop/troubleshoot/overview/ - /desktop/troubleshoot/ tags: [ Troubleshooting ] weight: 10 @@ -47,10 +47,6 @@ Docker Desktop to their initial state, the same as when Docker Desktop was first If you are a Mac or Linux user, you also have the option to **Uninstall** Docker Desktop from your system. -> [!TIP] -> -> If you need to contact support, select the **Question mark** icon near the top-right corner of Docker Dashboard, and then select **Contact support**. Users with a paid Docker subscription can use this option to send a support request. - ## Diagnose > [!TIP] @@ -193,58 +189,9 @@ If you don't have a paid Docker subscription, create an issue on GitHub: ### Self-diagnose tool -Docker Desktop contains a self-diagnose tool which can help you identify some common problems. - -{{< tabs group="os" >}} -{{< tab name="Windows" >}} -1. Locate the `com.docker.diagnose` tool. - - ```console - $ C:\Program Files\Docker\Docker\resources\com.docker.diagnose.exe - ``` - -2. In PowerShell, run the self-diagnose tool: - - ```console - $ & "C:\Program Files\Docker\Docker\resources\com.docker.diagnose.exe" check - ``` - -{{< /tab >}} -{{< tab name="Mac" >}} - -1. Locate the `com.docker.diagnose` tool. - - ```console - $ /Applications/Docker.app/Contents/MacOS/com.docker.diagnose - ``` - -2. Run the self-diagnose tool: - - ```console - $ /Applications/Docker.app/Contents/MacOS/com.docker.diagnose check - ``` - -{{< /tab >}} -{{< tab name="Linux" >}} - -1. Locate the `com.docker.diagnose` tool. - -2. Run the self-diagnose tool: - - ```console - $ /opt/docker-desktop/bin/com.docker.diagnose check - ``` - -{{< /tab >}} -{{< /tabs >}} - -The tool runs a suite of checks and displays **PASS** or **FAIL** next to each check. If there are any failures, it highlights the most relevant at the end of the report. - -You can then create an issue on GitHub: - -- [For Linux](https://github.com/docker/desktop-linux/issues) -- [For Mac](https://github.com/docker/for-mac/issues) -- [For Windows](https://github.com/docker/for-win/issues) +> [!IMPORTANT] +> +> This tool has been deprecated. ## Check the logs @@ -317,5 +264,4 @@ to learn how to view the Docker Daemon logs. ## Further resources - View specific [troubleshoot topics](topics.md). -- Implement [workarounds for common problems](workarounds.md) - View information on [known issues](known-issues.md) diff --git a/content/manuals/desktop/troubleshoot-and-support/troubleshoot/known-issues.md b/content/manuals/desktop/troubleshoot-and-support/troubleshoot/known-issues.md index 6cef69292412..d4d6afc5638c 100644 --- a/content/manuals/desktop/troubleshoot-and-support/troubleshoot/known-issues.md +++ b/content/manuals/desktop/troubleshoot-and-support/troubleshoot/known-issues.md @@ -9,20 +9,13 @@ aliases: --- {{< tabs >}} -{{< tab name="For all platforms" >}} -* IPv6 is not yet supported on Docker Desktop. -{{< /tab >}} {{< tab name="For Mac with Intel chip" >}} -* The Mac Activity Monitor reports that Docker is using twice the amount of memory it's actually using. This is due to a bug in MacOS. We have written [a detailed report](https://docs.google.com/document/d/17ZiQC1Tp9iH320K-uqVLyiJmk4DHJ3c4zgQetJiKYQM/edit?usp=sharing) on this. - -* Force-ejecting the `.dmg` after running `Docker.app` from it can cause the - whale icon to become unresponsive, Docker tasks to show as not responding in - the Activity Monitor, and for some processes to consume a large amount of CPU - resources. Reboot and restart Docker to resolve these issues. +- The Mac Activity Monitor reports that Docker is using twice the amount of memory it's actually using. This is due to a [bug in macOS].(https://docs.google.com/document/d/17ZiQC1Tp9iH320K-uqVLyiJmk4DHJ3c4zgQetJiKYQM/edit?usp=sharing) on this. -* Docker doesn't auto-start after sign in even when it's enabled in **Settings**. This is related to a set of issues with Docker helper, registration, and versioning. +- Force-ejecting the `.dmg` after running `Docker.app` from it can cause the + whale icon to become unresponsive, Docker tasks to show as not responding in the Activity Monitor, and for some processes to consume a large amount of CPU resources. Reboot and restart Docker to resolve these issues. -* Docker Desktop uses the `HyperKit` hypervisor +- Docker Desktop uses the `HyperKit` hypervisor (https://github.com/docker/hyperkit) in macOS 10.10 Yosemite and higher. If you are developing with tools that have conflicts with `HyperKit`, such as [Intel Hardware Accelerated Execution Manager @@ -32,7 +25,7 @@ aliases: This allows you to continue work with the other tools and prevent `HyperKit` from interfering. -* If you are working with applications like [Apache +- If you are working with applications like [Apache Maven](https://maven.apache.org/) that expect settings for `DOCKER_HOST` and `DOCKER_CERT_PATH` environment variables, specify these to connect to Docker instances through Unix sockets. For example: @@ -41,39 +34,18 @@ aliases: $ export DOCKER_HOST=unix:///var/run/docker.sock ``` -* There are a number of issues with the performance of directories bind-mounted - into containers. In particular, writes of small blocks, and traversals of large - directories are currently slow. Additionally, containers that perform large - numbers of directory operations, such as repeated scans of large directory - trees, may suffer from poor performance. Applications that behave in this way - include: - - - `rake` - - `ember build` - - Symfony - - Magento - - Zend Framework - - PHP applications that use [Composer](https://getcomposer.org) to install - dependencies in a `vendor` folder - - As a workaround for this behavior, you can put vendor or third-party library - directories in Docker volumes, perform temporary file system operations - outside of bind mounts, and use third-party tools like Unison or `rsync` to - synchronize between container directories and bind-mounted directories. We are - actively working on performance improvements using a number of different - techniques. To learn more, see the [topic on our roadmap](https://github.com/docker/roadmap/issues/7). {{< /tab >}} {{< tab name="For Mac with Apple silicon" >}} -- On Apple silicon in native `arm64` containers, older versions of `libssl` such as `debian:buster`, `ubuntu:20.04`, and `centos:8` will segfault when connected to some TLS servers, for example, `curl https://dl.yarnpkg.com`. The bug is fixed in newer versions of `libssl` in `debian:bullseye`, `ubuntu:21.04`, and `fedora:35`. + - Some command line tools do not work when Rosetta 2 is not installed. - The old version 1.x of `docker-compose`. Use Compose V2 instead - type `docker compose`. - The `docker-credential-ecr-login` credential helper. - Some images do not support the ARM64 architecture. You can add `--platform linux/amd64` to run (or build) an Intel image using emulation. - However, attempts to run Intel-based containers on Apple silicon machines under emulation can crash as qemu sometimes fails to run the container. In addition, filesystem change notification APIs (`inotify`) do not work under qemu emulation. Even when the containers do run correctly under emulation, they will be slower and use more memory than the native equivalent. + However, attempts to run Intel-based containers on Apple silicon machines under emulation can crash as QEMU sometimes fails to run the container. In addition, filesystem change notification APIs (`inotify`) do not work under QEMU emulation. Even when the containers do run correctly under emulation, they will be slower and use more memory than the native equivalent. - In summary, running Intel-based containers on Arm-based machines should be regarded as "best effort" only. We recommend running arm64 containers on Apple silicon machines whenever possible, and encouraging container authors to produce arm64, or multi-arch, versions of their containers. This issue should become less common over time, as more and more images are rebuilt [supporting multiple architectures](https://www.docker.com/blog/multi-arch-build-and-images-the-simple-way/). -- `ping` from inside a container to the Internet does not work as expected. To test the network, use `curl` or `wget`. See [docker/for-mac#5322](https://github.com/docker/for-mac/issues/5322#issuecomment-809392861). + In summary, running Intel-based containers on Arm-based machines should be regarded as "best effort" only. We recommend running `arm64` containers on Apple silicon machines whenever possible, and encouraging container authors to produce `arm64`, or multi-arch, versions of their containers. This issue should become less common over time, as more and more images are rebuilt [supporting multiple architectures](https://www.docker.com/blog/multi-arch-build-and-images-the-simple-way/). - Users may occasionally experience data drop when a TCP stream is half-closed. + {{< /tab >}} {{< /tabs >}} diff --git a/content/manuals/desktop/troubleshoot-and-support/troubleshoot/topics.md b/content/manuals/desktop/troubleshoot-and-support/troubleshoot/topics.md index b4ba08920dc1..17d946d1a8e1 100644 --- a/content/manuals/desktop/troubleshoot-and-support/troubleshoot/topics.md +++ b/content/manuals/desktop/troubleshoot-and-support/troubleshoot/topics.md @@ -3,11 +3,12 @@ description: Explore common troubleshooting topics for Docker Desktop keywords: Linux, Mac, Windows, troubleshooting, topics, Docker Desktop title: Troubleshoot topics for Docker Desktop linkTitle: Common topics -toc_max: 4 +toc_max: 3 tags: [ Troubleshooting ] weight: 10 aliases: - /desktop/troubleshoot/topics/ + - /manuals/desktop/troubleshoot-and-support/troubleshoot/workarounds/ --- > [!TIP] @@ -20,57 +21,114 @@ aliases: ## Topics for all platforms -### Make sure certificates are set up correctly +### Certificates not set up correctly -Docker Desktop ignores certificates listed under insecure registries, and -does not send client certificates to them. Commands like `docker run` that -attempt to pull from the registry produces error messages on the command line, -like this: +#### Error message + +When attempting to pull from a registry using `docker run`, you may encounter the following error: ```console Error response from daemon: Get http://192.168.203.139:5858/v2/: malformed HTTP response "\x15\x03\x01\x00\x02\x02" ``` -As well as on the registry. For example: +Additionally, logs from the registry may show: ```console 2017/06/20 18:15:30 http: TLS handshake error from 192.168.203.139:52882: tls: client didn't provide a certificate 2017/06/20 18:15:30 http: TLS handshake error from 192.168.203.139:52883: tls: first record does not look like a TLS handshake ``` +#### Possible causes + +- Docker Desktop ignores certificates listed under insecure registries. +- Client certificates are not sent to insecure registries, causing handshake failures. + +#### Solution + +- Ensure that your registry is properly configured with valid SSL certificates. +- If your registry is self-signed, configure Docker to trust the certificate by adding it to Docker’s certificates directory (/etc/docker/certs.d/ on Linux). +- If the issue persists, check your Docker daemon configuration and enable TLS authentication. + ### Docker Desktop's UI appears green, distorted, or has visual artifacts -Docker Desktop uses hardware-accelerated graphics by default, which may cause problems for some GPUs. In such cases, -Docker Desktop will launch successfully, but some screens may appear green, distorted, -or have some visual artifacts. +#### Cause -To work around this issue, disable hardware acceleration by creating a `"disableHardwareAcceleration": true` entry in Docker Desktop's `settings-store.json` file (or `settings.json` for Docker Desktop versions 4.34 and earlier). You can find this file at: +Docker Desktop uses hardware-accelerated graphics by default, which may cause problems for some GPUs. -- Mac: `~/Library/Group Containers/group.com.docker/settings-store.json` -- Windows: `C:\Users\[USERNAME]\AppData\Roaming\Docker\settings-store.json` -- Linux: `~/.docker/desktop/settings-store.json.` +#### Solution -After updating the `settings-store.json` file, close and restart Docker Desktop to apply the changes. +Disable hardware acceleration: -## Topics for Linux and Mac +1. Edit Docker Desktop's `settings-store.json` file (or `settings.json` for Docker Desktop versions 4.34 and earlier). You can find this file at: + + - Mac: `~/Library/Group Containers/group.com.docker/settings-store.json` + - Windows: `C:\Users\[USERNAME]\AppData\Roaming\Docker\settings-store.json` + - Linux: `~/.docker/desktop/settings-store.json.` + +2. Add the following entry: + + ```JSON + $ "disableHardwareAcceleration": true + ``` + +3. Save the file and restart Docker Desktop. + +### Using mounted volumes and getting runtime errors indicating an application file is not found, access to a volume mount is denied, or a service cannot start + +#### Cause + +If your project directory is located outside your home directory (`/home/`), Docker Desktop requires file sharing permissions to access it. + +#### Solution + +Enable file sharing in Docker Desktop for Mac and Linux: + +1. Navigate to **Settings**, select **Resources** and then **File sharing**. +2. Add the drive or folder that contains the Dockerfile and volume mount paths. + +Enable file sharing in Docker Desktop for Windows: + +1. From **Settings**, select **Shared Folders**. +2. Share the folder that contains the Dockerfile and volume mount paths. + +### `port already allocated` errors + +#### Error message + +When starting a container, you may see an error like: + +```text +Bind for 0.0.0.0:8080 failed: port is already allocated +``` + +Or + +```text +listen tcp:0.0.0.0:8080: bind: address is already in use +``` + +#### Cause + +- Another application on your system is already using the specified port. +- A previously running container was not stopped properly and is still bound to the port. -### Volume mounting requires file sharing for any project directories outside of `$HOME` +#### Solution -If you are using mounted volumes and get runtime errors indicating an -application file is not found, access to a volume mount is denied, or a service -cannot start, such as when using [Docker Compose](/manuals/compose/gettingstarted.md), -you might need to turn on [file sharing](/manuals/desktop/settings-and-maintenance/settings.md#file-sharing). +To discover the identity of this software, either: +- Use the `resmon.exe` GUI, select **Network** and then **Listening Ports** +- In PowerShell, use `netstat -aon | find /i "listening "` to discover the PID of the process +currently using the port (the PID is the number in the rightmost column). -Volume mounting requires shared drives for projects that live outside of the -`/home/` directory. From **Settings**, select **Resources** and then **File sharing**. Share the drive that contains the Dockerfile and volume. +Then, decide whether to shut the other process down, or to use a different port in your +Docker app. -### Docker Desktop fails to start on MacOS or Linux platforms +## Topics for Linux and Mac -On MacOS and Linux, Docker Desktop creates Unix domain sockets used for inter-process communication. +### Docker Desktop fails to start on Mac or Linux platforms -Docker fails to start if the absolute path length of any of these sockets exceeds the OS limitation which is 104 characters on MacOS and 108 characters on Linux. These sockets are created under the user's home directory. If the user ID length is such that the absolute path of the socket exceeds the OS path length limitation, then Docker Desktop is unable to create the socket and fails to start. The workaround for this is to shorten the user ID which we recommend has a maximum length of 33 characters on MacOS and 55 characters on Linux. +#### Error message -Following are the examples of errors on MacOS which indicate that the startup failure was due to exceeding the above mentioned OS limitation: +Docker fails to start due to Unix domain socket path length limitations: ```console [vpnkit-bridge][F] listen unix /Library/Containers/com.docker.docker/Data/http-proxy-control.sock: bind: invalid argument @@ -80,32 +138,83 @@ Following are the examples of errors on MacOS which indicate that the startup fa [com.docker.backend][E] listen(vsock:4099) failed: listen unix /Library/Containers/com.docker.docker/Data/vms/0/00000002.00001003: bind: invalid argument ``` +#### Cause + +On Mac and Linux, Docker Desktop creates Unix domain sockets used for inter-process communication. These sockets are created under the user's home directory. + +Unix domain sockets have a maximum path length: + - 104 characters on Mac + - 108 characters on Linux + +If your home directory path is too long, Docker Desktop fails to create necessary sockets. + +#### Solution + +Ensure your username is short enough to keep paths within the allowed limit: + - Mac: Username should be ≤ 33 characters + - Linux: Username should be ≤ 55 characters + ## Topics for Mac -### Incompatible CPU detected +### Upgrade requires administrator privileges -> [!TIP] +#### Cause + +On macOS, users without administrator privileges cannot perform in-app upgrades from the Docker Desktop Dashboard. + +#### Solution + +> [!IMPORTANT] > -> If you are seeing this error, check you've installed the correct Docker Desktop for your architecture. +> Do not uninstall the current version before upgrading. Doing so deletes all local Docker containers, images, and volumes. + +To upgrade Docker Desktop: + +- Ask an administrator to install the newer version over the existing one. +- Use the []`--user` install flag](/manuals/desktop/setup/install/mac-install.md#security-and-access) if appropriate for your setup. + +### Persistent notification telling me an application has changed my Desktop configurations + +#### Cause + +You receive this notification because the Configuration integrity check feature has detected that a third-party application has altered your Docker Desktop configuration. This usually happens due to incorrect or missing symlinks. The notification ensures you are aware of these changes so you can review and repair any potential issues to maintain system reliability. + +Opening the notification presents a pop-up window which provides detailed information about the detected integrity issues. + +#### Solution + +If you choose to ignore the notification, it will be shown again only at the next Docker Desktop startup. If you choose to repair your configuration, you won't be prompted again. + +If you want to switch off Configuration integrity check notifications, navigate to Docker Desktop's settings and in the **General** tab, clear the **Automatically check configuration** setting. + +### `com.docker.vmnetd` is still running after I quit the app + +The privileged helper process `com.docker.vmnetd` is started by `launchd` and +runs in the background. The process does not consume any resources unless +`Docker.app` connects to it, so it's safe to ignore. + +### Incompatible CPU detected + +#### Cause Docker Desktop requires a processor (CPU) that supports virtualization and, more specifically, the [Apple Hypervisor framework](https://developer.apple.com/library/mac/documentation/DriversKernelHardware/Reference/Hypervisor/). -Docker Desktop is only compatible with Mac systems that have a CPU that supports the Hypervisor framework. Most Macs built in 2010 and later support it, as described in the Apple Hypervisor Framework documentation about supported hardware: -*Generally, machines with an Intel VT-x feature set that includes Extended Page -Tables (EPT) and Unrestricted Mode are supported.* +#### Solution -To check if your Mac supports the Hypervisor framework, run the following command in a terminal window. +Check that: -```console -$ sysctl kern.hv_support -``` + - You've installed the correct Docker Desktop for your architecture + - Your Mac supports Apple's Hypervisor framework. To check if your Mac supports the Hypervisor framework, run the following command in a terminal window. -If your Mac supports the Hypervisor Framework, the command prints -`kern.hv_support: 1`. + ```console + $ sysctl kern.hv_support + ``` -If not, the command prints `kern.hv_support: 0`. + If your Mac supports the Hypervisor Framework, the command prints `kern.hv_support: 1`. + + If not, the command prints `kern.hv_support: 0`. See also, [Hypervisor Framework Reference](https://developer.apple.com/library/mac/documentation/DriversKernelHardware/Reference/Hypervisor/) @@ -113,63 +222,82 @@ in the Apple documentation, and Docker Desktop [Mac system requirements](/manual ### VPNKit keeps breaking -In Docker Desktop version 4.19, gVisor replaced VPNKit to enhance the performance of VM networking when using the Virtualization framework on macOS 13 and above. +#### Cause + +In Docker Desktop version 4.19, gVisor replaced VPNKit to enhance the performance of VM networking when using the Virtualization framework on macOS 13 and later. + +#### Solution + +To continue using VPNKit: -To continue using VPNKit, add `"networkType":"vpnkit"` to your `settings-store.json` file located at `~/Library/Group Containers/group.com.docker/settings-store.json`. +1. Open your `settings-store.json` file located at `~/Library/Group Containers/group.com.docker/settings-store.json` +2. Add: + + ```JSON + $ "networkType":"vpnkit" + ``` +3. Save the file and restart Docker Desktop. ## Topics for Windows -### Volumes +### Docker Desktop fails to start when anti-virus software is installed -#### Permissions errors on data directories for shared volumes +#### Cause -When sharing files from Windows, Docker Desktop sets permissions on [shared volumes](/manuals/desktop/settings-and-maintenance/settings.md#file-sharing) -to a default value of [0777](https://chmodcommand.com/chmod-0777/) -(`read`, `write`, `execute` permissions for `user` and for `group`). +Some anti-virus software may be incompatible with Hyper-V and Microsoft +Windows 10 builds. The conflict +typically occurs after a Windows update and +manifests as an error response from the Docker daemon and a Docker Desktop start failure. -The default permissions on shared volumes are not configurable. If you are -working with applications that require permissions different from the shared -volume defaults at container runtime, you need to either use non-host-mounted -volumes or find a way to make the applications work with the default file -permissions. +#### Solution -See also, -[Can I change permissions on shared volumes for container-specific deployment requirements?](/manuals/desktop/troubleshoot-and-support/faqs/windowsfaqs.md#can-i-change-permissions-on-shared-volumes-for-container-specific-deployment-requirements) -in the FAQs. +For a temporary workaround, uninstall the anti-virus software, or +add Docker to the exclusions/exceptions in your antivirus software. -#### Volume mounting requires shared folders for Linux containers +### Permissions errors on data directories for shared volumes -If you are using mounted volumes and get runtime errors indicating an -application file is not found, access is denied to a volume mount, or a service -cannot start, such as when using [Docker Compose](/manuals/compose/gettingstarted.md), -you might need to turn on [shared folders](/manuals/desktop/settings-and-maintenance/settings.md#file-sharing). +#### Cause -With the Hyper-V backend, mounting files from Windows requires shared folders for Linux containers. From **Settings**, select **Shared Folders** and share the folder that contains the -Dockerfile and volume. +When sharing files from Windows, Docker Desktop sets permissions on [shared volumes](/manuals/desktop/settings-and-maintenance/settings.md#file-sharing) +to a default value of [0777](https://chmodcommand.com/chmod-0777/) +(`read`, `write`, `execute` permissions for `user` and for `group`). -#### Support for symlinks +The default permissions on shared volumes are not configurable. -Symlinks work within and across containers. To learn more, see [How do symlinks work on Windows?](/manuals/desktop/troubleshoot-and-support/faqs/windowsfaqs.md#how-do-symlinks-work-on-windows). +#### Solution -#### Avoid unexpected syntax errors, use Unix style line endings for files in containers +If you are +working with applications that require different permissions, either: + - Use non-host-mounted volumes + - Find a way to make the applications work with the default file permissions -Any file destined to run inside a container must use Unix style `\n` line -endings. This includes files referenced at the command line for builds and in -RUN commands in Docker files. +### Unexpected syntax errors, use Unix style line endings for files in containers + +#### Cause + +Docker containers expect Unix-style line `\n` endings, not Windows style: `\r\n`. This includes files referenced at the command line for builds and in RUN commands in Docker files. -Docker containers and `docker build` run in a Unix environment, so files in -containers must use Unix style line endings: `\n`, _not_ Windows style: `\r\n`. Keep this in mind when authoring files such as shell scripts using Windows tools, where the default is likely to be Windows style line endings. These commands ultimately get passed to Unix commands inside a Unix based container (for example, a shell script passed to `/bin/sh`). If Windows style line endings are used, `docker run` fails with syntax errors. -For an example of this issue and the resolution, see this issue on GitHub: -[Docker RUN fails to execute shell -script](https://github.com/moby/moby/issues/24388). +#### Solution + + - Convert files to Unix-style line endings using: + + ```console + $ dos2unix script.sh + ``` +- In VS Code, set line endings to `LF` (Unix) instead of `CRLF` (Windows). + +### Path conversion errors on Windows + +#### Cause + +Unlike Linux, Windows requires explicit path conversion for volume mounting. -#### Path conversion on Windows On Linux, the system takes care of mounting a path to another path. For example, when you run the following command on Linux: @@ -179,7 +307,9 @@ $ docker run --rm -ti -v /home/user/work:/work alpine It adds a `/work` directory to the target container to mirror the specified path. -However, on Windows, you must update the source path. For example, if you are using +#### Solution + +Update the source path. For example, if you are using the legacy Windows shell (`cmd.exe`), you can use the following command: ```console @@ -195,73 +325,61 @@ Docker Desktop also allows you to use Unix-style path to the appropriate format. $ docker run --rm -ti -v /c/Users/user/work:/work alpine ls /work ``` -#### Working with Git Bash +### Docker commands failing in Git Bash -Git Bash (or MSYS) provides a Unix-like environment on Windows. These tools apply their own -preprocessing on the command line. For example, if you run the following command in Git Bash, it gives an error: +#### Error message ```console $ docker run --rm -ti -v C:\Users\user\work:/work alpine docker: Error response from daemon: mkdir C:UsersUserwork: Access is denied. ``` -This is because the `\` character has a special meaning in Git Bash. If you are using Git Bash, you must neutralize it using `\\`: - ```console -$ docker run --rm -ti -v C:\\Users\\user\\work:/work alpine +$ docker run --rm -ti -v $(pwd):/work alpine +docker: Error response from daemon: OCI runtime create failed: invalid mount {Destination:\Program Files\Git\work Type:bind Source:/run/desktop/mnt/host/c/Users/user/work;C Options:[rbind rprivate]}: mount destination \Program Files\Git\work not absolute: unknown. ``` -Also, in scripts, the `pwd` command is used to avoid hard-coding file system locations. Its output is a Unix-style path. +#### Cause -```console -$ pwd -/c/Users/user/work -``` +Git Bash (or MSYS) provides a Unix-like environment on Windows. These tools apply their own +preprocessing on the command line. -Combined with the `$()` syntax, the command below works on Linux, however, it fails on Git Bash. +This affects `$(pwd)`, colon-separated paths, and tilde (`~`) -```console -$ docker run --rm -ti -v $(pwd):/work alpine -docker: Error response from daemon: OCI runtime create failed: invalid mount {Destination:\Program Files\Git\work Type:bind Source:/run/desktop/mnt/host/c/Users/user/work;C Options:[rbind rprivate]}: mount destination \Program Files\Git\work not absolute: unknown. -``` +Also, the `\` character has a special meaning in Git Bash. -You can work around this issue by using an extra `/` +#### Solution -```console -$ docker run --rm -ti -v /$(pwd):/work alpine -``` + - Disable Git Bash path conversion temporarily. For example, run the command with MSYS path conversion disable: + ```console + $ MSYS_NO_PATHCONV=1 docker run --rm -ti -v $(pwd):/work alpine + ``` + - Use proper path formatting: + - Use double forward and backslashes (`\\` `//`) instead of single (`\` `/`). + - If referencing `$(pwd)`, add an extra `/`: Portability of the scripts is not affected as Linux treats multiple `/` as a single entry. -Each occurrence of paths on a single line must be neutralized. -```console -$ docker run --rm -ti -v /$(pwd):/work alpine ls /work -ls: C:/Program Files/Git/work: No such file or directory -``` - -In this example, The `$(pwd)` is not converted because of the preceding '/'. However, the second '/work' is transformed by the -POSIX layer before passing it to Docker Desktop. You can also work around this issue by using an extra `/`. +### Docker Desktop fails due to Virtualization not working -```console -$ docker run --rm -ti -v /$(pwd):/work alpine ls //work -``` +#### Error message -To verify whether the errors are generated from your script, or from another source, you can use an environment variable. For example: +A typical error message is "Docker Desktop - Unexpected WSL error" mentioning the error code +`Wsl/Service/RegisterDistro/CreateVm/HCS/HCS_E_HYPERV_NOT_INSTALLED`. Manually executing `wsl` commands +also fails with the same error code. -```console -$ MSYS_NO_PATHCONV=1 docker run --rm -ti -v $(pwd):/work alpine ls /work -``` +#### Cause -It only expects the environment variable here. The value doesn't matter. +- Virtualization settings are disabled in the BIOS. +- Windows Hyper-V or WSL 2 components are missing. -In some cases, MSYS also transforms colons to semicolon. Similar conversions can also occur -when using `~` because the POSIX layer translates it to a DOS path. `MSYS_NO_PATHCONV` also works in this case. +Note some third-party software such as Android emulators will disable Hyper-V on install. -### Virtualization +#### Solutions Your machine must have the following features for Docker Desktop to function correctly: -#### WSL 2 and Windows Home +##### WSL 2 and Windows Home 1. Virtual Machine Platform 2. [Windows Subsystem for Linux](https://docs.microsoft.com/en-us/windows/wsl/install-win10) @@ -271,7 +389,22 @@ Your machine must have the following features for Docker Desktop to function cor ![WSL 2 enabled](../../images/wsl2-enabled.png) -#### Hyper-V +It must be possible to run WSL 2 commands without error, for example: + +```console +PS C:\users\> wsl -l -v + NAME STATE VERSION +* Ubuntu Running 2 + docker-desktop Stopped 2 +PS C:\users\> wsl -d docker-desktop echo WSL 2 is working +WSL 2 is working +``` + +If the features are enabled but the commands are not working, first check [Virtualization is turned on](#virtualization-must-be-turned-on) +then [enable the Hypervisor at Windows startup](#hypervisor-enabled-at-windows-startup) if required. If running Docker +Desktop in a Virtual Machine, ensure [the hypervisor has nested virtualization enabled](#turn-on-nested-virtualization). + +##### Hyper-V On Windows 10 Pro or Enterprise, you can also use Hyper-V with the following features enabled: @@ -294,10 +427,10 @@ To install Hyper-V manually, see [Install Hyper-V on Windows 10](https://msdn.mi From the start menu, type **Turn Windows features on or off** and press enter. In the subsequent screen, verify that Hyper-V is enabled. -#### Virtualization must be turned on +##### Virtualization must be turned on In addition to [Hyper-V](#hyper-v) or [WSL 2](/manuals/desktop/features/wsl/_index.md), virtualization must be turned on. Check the -Performance tab on the Task Manager. Alternatively, you can type 'systeminfo' into your terminal. If you see 'Hyper-V Requirements: A hypervisor has been detected. Features required for Hyper-V will not be displayed', then virtualization is enabled. +Performance tab on the Task Manager. Alternatively, you can type `systeminfo` into your terminal. If you see `Hyper-V Requirements: A hypervisor has been detected. Features required for Hyper-V will not be displayed`, then virtualization is enabled. ![Task Manager](../../images/virtualization-enabled.png) @@ -306,9 +439,9 @@ Docker Desktop cannot start. To turn on nested virtualization, see [Run Docker Desktop for Windows in a VM or VDI environment](/manuals/desktop/setup/vm-vdi.md#turn-on-nested-virtualization). -#### Hypervisor enabled at Windows startup +##### Hypervisor enabled at Windows startup -If you have completed the steps described above and are still experiencing +If you have completed the previous steps and are still experiencing Docker Desktop startup issues, this could be because the Hypervisor is installed, but not launched during Windows startup. Some tools (such as older versions of Virtual Box) and video game installers turn off hypervisor on boot. To turn it back on: @@ -319,7 +452,7 @@ Virtual Box) and video game installers turn off hypervisor on boot. To turn it b You can also refer to the [Microsoft TechNet article](https://social.technet.microsoft.com/Forums/en-US/ee5b1d6b-09e2-49f3-a52c-820aafc316f9/hyperv-doesnt-work-after-upgrade-to-windows-10-1809?forum=win10itprovirt) on Code flow guard (CFG) settings. -#### Turn on nested virtualization +##### Turn on nested virtualization If you are using Hyper-V and you get the following error message when running Docker Desktop in a VDI environment: @@ -329,29 +462,57 @@ The Virtual Machine Management Service failed to start the virtual machine 'Dock Try [enabling nested virtualization](/manuals/desktop/setup/vm-vdi.md#turn-on-nested-virtualization). +### Docker Desktop with Windows Containers fails with "The media is write protected"" -### Windows containers and Windows Server +#### Error message -Docker Desktop is not supported on Windows Server. If you have questions about how to run Windows containers on Windows 10, see -[Switch between Windows and Linux containers](/manuals/desktop/troubleshoot-and-support/faqs/windowsfaqs.md#how-do-i-switch-between-windows-and-linux-containers). +`FSCTL_EXTEND_VOLUME \\?\Volume{GUID}: The media is write protected` -A full tutorial is available in [docker/labs](https://github.com/docker/labs) on -[Getting Started with Windows Containers](https://github.com/docker/labs/blob/master/windows/windows-containers/README.md). +#### Cause -You can install a native Windows binary which allows you to develop and run -Windows containers without Docker Desktop. However, if you install Docker this way, you cannot develop or run Linux containers. If you try to run a Linux container on the native Docker daemon, an error occurs: +If you're encountering failures when running Docker Desktop with Windows Containers, it might be due to +a specific Windows configuration policy: FDVDenyWriteAccess. -```none -C:\Program Files\Docker\docker.exe: - image operating system "linux" cannot be used on this platform. - See 'C:\Program Files\Docker\docker.exe run --help'. -``` +This policy, when enabled, causes Windows to mount all fixed drives not encrypted by BitLocker-encrypted as read-only. +This also affects virtual machine volumes and as a result, Docker Desktop may not be able to start or run containers +correctly because it requires read-write access to these volumes. + +FDVDenyWriteAccess is a Windows Group Policy setting that, when enabled, prevents write access to fixed data drives that are not protected +by BitLocker. This is often used in security-conscious environments but can interfere with development tools like Docker. +In the Windows registry it can be found at `HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Policies\Microsoft\FVE\FDVDenyWriteAccess`. + +#### Solutions + +Docker Desktop does not support running Windows Containers on systems where FDVDenyWriteAccess is enabled. This setting interferes with the +ability of Docker to mount volumes correctly, which is critical for container functionality. + +To use Docker Desktop with Windows Containers, ensure that FDVDenyWriteAccess is disabled. You can check and change this setting in the registry or through Group Policy Editor (`gpedit.msc`) under: + +**Computer Configuration** > **Administrative Templates** > **Windows Components** > **BitLocker Drive Encryption** > **Fixed Data Drives** > **Deny write access to fixed drives not protected by BitLocker** + +> [!NOTE] +> +> Modifying Group Policy settings may require administrator privileges and should comply with your organization's IT policies. If the setting gets reset after some time this usually means that it was overriden by the centralized configuration of your IT department. Talk to them before making any changes. ### `Docker Desktop Access Denied` error message when starting Docker Desktop -Docker Desktop displays the **Docker Desktop - Access Denied** error if a Windows user is not part of the **docker-users** group. +#### Error message + +When starting Docker Desktop, the following error appears: + +```text +Docker Desktop - Access Denied +``` + +#### Cause + +The user is not part of the `docker-users` group, which is required for permissions. -If your admin account is different to your user account, add the **docker-users** group. Run **Computer Management** as an administrator and navigate to **Local Users and Groups** > **Groups** > **docker-users**. +#### Solution -Right-click to add the user to the group. Sign out and sign back in for the changes to take effect. +If your admin account is different to your user account, add it: +1. Run **Computer Management** as an administrator. +2. Navigate to **Local Users and Groups** > **Groups** > **docker-users**. +3. Right-click to add the user to the group. +4. Sign out and sign back in for the changes to take effect diff --git a/content/manuals/desktop/troubleshoot-and-support/troubleshoot/workarounds.md b/content/manuals/desktop/troubleshoot-and-support/troubleshoot/workarounds.md deleted file mode 100644 index 8720454e17e5..000000000000 --- a/content/manuals/desktop/troubleshoot-and-support/troubleshoot/workarounds.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -description: Common workarounds -keywords: linux, mac, windows, troubleshooting, workarounds, Docker Desktop -title: Workarounds for common problems -tags: [ Troubleshooting ] -weight: 20 -aliases: - - /desktop/troubleshoot/workarounds/ ---- - -### Reboot - -Restart your PC to stop / discard any vestige of the daemon running from the -previously installed version. - -### Unset `DOCKER_HOST` - -The `DOCKER_HOST` environmental variable does not need to be set. If you use -bash, use the command `unset ${!DOCKER_*}` to unset it. For other shells, -consult the shell's documentation. - -### Make sure Docker is running for webserver examples - -For the `hello-world-nginx` example and others, Docker Desktop must be -running to get to the webserver on `http://localhost/`. Make sure that the -Docker whale is showing in the menu bar, and that you run the Docker commands in -a shell that is connected to the Docker Desktop Engine. Otherwise, you might start the webserver container but get a "web page -not available" error when you go to `docker`. - -### How to solve `port already allocated` errors - -If you see errors like `Bind for 0.0.0.0:8080 failed: port is already allocated` -or `listen tcp:0.0.0.0:8080: bind: address is already in use` ... - -These errors are often caused by some other software on Windows using those -ports. To discover the identity of this software, either use the `resmon.exe` -GUI and click "Network" and then "Listening Ports" or in a PowerShell use -`netstat -aon | find /i "listening "` to discover the PID of the process -currently using the port (the PID is the number in the rightmost column). Decide -whether to shut the other process down, or to use a different port in your -docker app. - -### Docker Desktop fails to start when anti-virus software is installed - -Some anti-virus software may be incompatible with Hyper-V and Microsoft -Windows 10 builds. The conflict -typically occurs after a Windows update and -manifests as an error response from the Docker daemon and a Docker Desktop start failure. - -For a temporary workaround, uninstall the anti-virus software, or -explore other workarounds suggested on Docker Desktop forums. diff --git a/content/manuals/desktop/uninstall.md b/content/manuals/desktop/uninstall.md index d43695354e69..c5b5124ea2b7 100644 --- a/content/manuals/desktop/uninstall.md +++ b/content/manuals/desktop/uninstall.md @@ -10,18 +10,18 @@ weight: 210 > > Uninstalling Docker Desktop destroys Docker containers, images, volumes, and > other Docker-related data local to the machine, and removes the files generated -> by the application. To learn how to preserve important data before uninstalling, refer to the [back up and restore data](/manuals/desktop/settings-and-maintenance/backup-and-restore.md) section . +> by the application. To learn how to preserve important data before uninstalling, refer to the [back up and restore data](/manuals/desktop/settings-and-maintenance/backup-and-restore.md) section. {{< tabs >}} {{< tab name="Windows" >}} -To uninstall Docker Desktop from your Windows machine: +#### From the GUI 1. From the Windows **Start** menu, select **Settings** > **Apps** > **Apps & features**. 2. Select **Docker Desktop** from the **Apps & features** list and then select **Uninstall**. 3. Select **Uninstall** to confirm your selection. -You can also uninstall Docker Desktop from the CLI: +#### From the CLI 1. Locate the installer: ```console @@ -37,7 +37,7 @@ You can also uninstall Docker Desktop from the CLI: $ start /w "" "Docker Desktop Installer.exe" uninstall ``` -After uninstalling Docker Desktop, there may be some residual files left behind which you can remove manually. These are: +After uninstalling Docker Desktop, some residual files may remain which you can remove manually. These are: ```console C:\ProgramData\Docker @@ -52,121 +52,157 @@ C:\Users\\.docker {{< /tab >}} {{< tab name="Mac" >}} -To uninstall Docker Desktop from your Mac: +#### From the GUI -1. From the Docker menu, select the **Troubleshoot** icon in the top-right corner of the Docker Desktop Dashboard and then select **Uninstall**. -2. Select **Uninstall** to confirm your selection. +1. Open Docker Desktop. +2. In the top-right corner of the Docker Desktop Dashboard, select the **Troubleshoot** icon. +3. Select **Uninstall**. +4. When prompted, confirm by selecting **Uninstall** again. -You can also uninstall Docker Desktop from the CLI. Run: +You can then move the Docker application to the trash. -```console -$ /Applications/Docker.app/Contents/MacOS/uninstall -``` +#### From the CLI + +Run: -You may encounter the following error when uninstalling Docker Desktop using the uninstall command. ```console $ /Applications/Docker.app/Contents/MacOS/uninstall -Password: -Uninstalling Docker Desktop... -Error: unlinkat /Users//Library/Containers/com.docker.docker/.com.apple.containermanagerd.metadata.plist: operation not permitted ``` -The operation not permitted error is reported either on the file `.com.apple.containermanagerd.metadata.plist` or on the parent directory `/Users//Library/Containers/com.docker.docker/`. This error can be ignored as you have successfully uninstalled Docker Desktop. -You can remove the directory `/Users//Library/Containers/com.docker.docker/` later by allowing **Full Disk Access** to the terminal application you are using (**System Settings** > **Privacy & Security** > **Full Disk Access**). -After uninstalling Docker Desktop, there may be some residual files left behind which you can remove: +You can then move the Docker application to the trash. + +> [!NOTE] +> You may encounter the following error when uninstalling Docker Desktop using the uninstall command. +> +> ```console +> $ /Applications/Docker.app/Contents/MacOS/uninstall +> Password: +> Uninstalling Docker Desktop... +> Error: unlinkat /Users//Library/Containers/com.docker.docker/.com.apple.containermanagerd.metadata.plist: > operation not permitted +> ``` +> +> The operation not permitted error is reported either on the file `.com.apple.containermanagerd.metadata.plist` or on the parent directory `/Users//Library/Containers/com.docker.docker/`. This error can be ignored as you have successfully uninstalled Docker Desktop. +> You can remove the directory `/Users//Library/Containers/com.docker.docker/` later by allowing **Full Disk Access** to the terminal application you are using (**System Settings** > **Privacy & Security** > **Full Disk Access**). + +After uninstalling Docker Desktop, some residual files may remain which you can remove: ```console $ rm -rf ~/Library/Group\ Containers/group.com.docker $ rm -rf ~/.docker ``` -You can also move the Docker application to the trash. +With Docker Desktop version 4.36 and earlier, the following files may also be left on the file system. You can remove these with administrative privileges: + +```console +/Library/PrivilegedHelperTools/com.docker.vmnetd +/Library/PrivilegedHelperTools/com.docker.socket +``` {{< /tab >}} -{{< tab name="Linux" >}} +{{< tab name="Ubuntu" >}} -Docker Desktop is removed from a Linux host using the package manager. +To uninstall Docker Desktop for Ubuntu: -Once Docker Desktop is removed, users must delete the `credsStore` and `currentContext` properties from the `~/.docker/config.json`. +1. Remove the Docker Desktop application. Run: -{{< /tab >}} -{{< tab name="Ubuntu" >}} + ```console + $ sudo apt remove docker-desktop + ``` -To remove Docker Desktop for Ubuntu, run: + This removes the Docker Desktop package itself but doesn’t delete all of its files or settings. -```console -$ sudo apt remove docker-desktop -``` +2. Manually remove leftover file. + + ```console + $ rm -r $HOME/.docker/desktop + $ sudo rm /usr/local/bin/com.docker.cli + $ sudo apt purge docker-desktop + ``` -For a complete cleanup, remove configuration and data files at `$HOME/.docker/desktop`, the symlink at `/usr/local/bin/com.docker.cli`, and purge -the remaining systemd service files. + This removes configuration and data files at `$HOME/.docker/desktop`, the symlink at `/usr/local/bin/com.docker.cli`, and purges the remaining systemd service files. -```console -$ rm -r $HOME/.docker/desktop -$ sudo rm /usr/local/bin/com.docker.cli -$ sudo apt purge docker-desktop -``` +3. Clean up Docker config settings. In `$HOME/.docker/config.json`, remove the `credsStore` and `currentContext` properties. -Remove the `credsStore` and `currentContext` properties from `$HOME/.docker/config.json`. Additionally, you must delete any edited configuration files manually. + These entries tell Docker where to store credentials and which context is active. If they remain after uninstalling Docker Desktop, they may conflict with a future Docker setup. {{< /tab >}} {{< tab name="Debian" >}} -To remove Docker Desktop for Debian, run: +To uninstall Docker Desktop for Debian, run: -```console -$ sudo apt remove docker-desktop -``` +1. Remove the Docker Desktop application: -For a complete cleanup, remove configuration and data files at `$HOME/.docker/desktop`, the symlink at `/usr/local/bin/com.docker.cli`, and purge -the remaining systemd service files. + ```console + $ sudo apt remove docker-desktop + ``` -```console -$ rm -r $HOME/.docker/desktop -$ sudo rm /usr/local/bin/com.docker.cli -$ sudo apt purge docker-desktop -``` + This removes the Docker Desktop package itself but doesn’t delete all of its files or settings. -Remove the `credsStore` and `currentContext` properties from `$HOME/.docker/config.json`. Additionally, you must delete any edited configuration files manually. +2. Manually remove leftover file. + + ```console + $ rm -r $HOME/.docker/desktop + $ sudo rm /usr/local/bin/com.docker.cli + $ sudo apt purge docker-desktop + ``` + + This removes configuration and data files at `$HOME/.docker/desktop`, the symlink at `/usr/local/bin/com.docker.cli`, and purges the remaining systemd service files. + +3. Clean up Docker config settings. In `$HOME/.docker/config.json`, remove the `credsStore` and `currentContext` properties. + + These entries tell Docker where to store credentials and which context is active. If they remain after uninstalling Docker Desktop, they may conflict with a future Docker setup. {{< /tab >}} {{< tab name="Fedora" >}} -To remove Docker Desktop for Fedora, run: +To uninstall Docker Desktop for Fedora: -```console -$ sudo dnf remove docker-desktop -``` +1. Remove the Docker Desktop application. Run: -For a complete cleanup, remove configuration and data files at `$HOME/.docker/desktop`, the symlink at `/usr/local/bin/com.docker.cli`, and purge -the remaining systemd service files. + ```console + $ sudo dnf remove docker-desktop + ``` -```console -$ rm -r $HOME/.docker/desktop -$ sudo rm /usr/local/bin/com.docker.cli -``` + This removes the Docker Desktop package itself but doesn’t delete all of its files or settings. + +2. Manually remove leftover file. + + ```console + $ rm -r $HOME/.docker/desktop + $ sudo rm /usr/local/bin/com.docker.cli + $ sudo apt purge docker-desktop + ``` -Remove the `credsStore` and `currentContext` properties from `$HOME/.docker/config.json`. Additionally, you must delete any edited configuration files manually. + This removes configuration and data files at `$HOME/.docker/desktop`, the symlink at `/usr/local/bin/com.docker.cli`, and purges the remaining systemd service files. + +3. Clean up Docker config settings. In `$HOME/.docker/config.json`, remove the `credsStore` and `currentContext` properties. + + These entries tell Docker where to store credentials and which context is active. If they remain after uninstalling Docker Desktop, they may conflict with a future Docker setup. {{< /tab >}} {{< tab name="Arch" >}} -To remove Docker Desktop for Arch, run: +To uninstall Docker Desktop for Arch: -```console -$ sudo pacman -R docker-desktop -``` +1. Remove the Docker Desktop application. Run: -For a complete cleanup, remove configuration and data files at `$HOME/.docker/desktop`, the symlink at `/usr/local/bin/com.docker.cli`, and purge -the remaining systemd service files. + ```console + $ sudo pacman -Rns docker-desktop + ``` -```console -$ rm -r $HOME/.docker/desktop -$ sudo rm /usr/local/bin/com.docker.cli -$ sudo pacman -Rns docker-desktop -``` + This removes the Docker Desktop package along with its configuration files and dependencies not required by other packages. + +2. Manually remove leftover files. + + ```console + $ rm -r $HOME/.docker/desktop + ``` + + This removes configuration and data files at `$HOME/.docker/desktop`. + +3. Clean up Docker config settings. In `$HOME/.docker/config.json`, remove the `credsStore` and `currentContext` properties. -Remove the `credsStore` and `currentContext` properties from `$HOME/.docker/config.json`. Additionally, you must delete any edited configuration files manually. + These entries tell Docker where to store credentials and which context is active. If they remain after uninstalling Docker Desktop, they may conflict with a future Docker setup. {{< /tab >}} {{< /tabs >}} diff --git a/content/manuals/desktop/use-desktop/_index.md b/content/manuals/desktop/use-desktop/_index.md index b7b309cf6b19..b12be1c3ef6e 100644 --- a/content/manuals/desktop/use-desktop/_index.md +++ b/content/manuals/desktop/use-desktop/_index.md @@ -10,32 +10,48 @@ aliases: When you open Docker Desktop, the Docker Desktop Dashboard displays. -![Docker Desktop Dashboard on Containers view](../images/dashboard.webp) +![Docker Desktop Dashboard on Containers view](../images/dashboard.png) -The **Containers** view provides a runtime view of all your containers and applications. It allows you to interact with containers and applications, and manage the lifecycle of your applications directly from your machine. This view also provides an intuitive interface to perform common actions to inspect, interact with, and manage your Docker objects including containers and Docker Compose-based applications. For more information, see [Explore running containers and applications](container.md). - -The **Images** view displays a list of your Docker images and allows you to run an image as a container, pull the latest version of an image from Docker Hub, and inspect images. It also displays a summary of image vulnerabilities. In addition, the **Images** view contains clean-up options to remove unwanted images from the disk to reclaim space. If you are logged in, you can also see the images you and your organization have shared on Docker Hub. For more information, see [Explore your images](images.md). - -The **Volumes** view displays a list of volumes and allows you to easily create and delete volumes and see which ones are being used. For more information, see [Explore volumes](volumes.md). - -The **Builds** view lets you inspect your build history and manage builders. By default, it displays a list of all your ongoing and completed builds. [Explore builds](builds.md). +It provides a centralized interface to manage your [containers](container.md), [images](images.md), [volumes](volumes.md), and [builds](builds.md). In addition, the Docker Desktop Dashboard lets you: +- Use [Ask Gordon](/manuals/ai/gordon/_index.md), a personal AI assistant embedded in Docker Desktop and the Docker CLI. It's designed to streamline your workflow and help you make the most of the Docker ecosystem. - Navigate to the **Settings** menu to configure your Docker Desktop settings. Select the **Settings** icon in the Dashboard header. - Access the **Troubleshoot** menu to debug and perform restart operations. Select the **Troubleshoot** icon in the Dashboard header. - Be notified of new releases, installation progress updates, and more in the **Notifications center**. Select the bell icon in the bottom-right corner of the Docker Desktop Dashboard to access the notification center. - Access the **Learning center** from the Dashboard header. It helps you get started with quick in-app walkthroughs and provides other resources for learning about Docker. For a more detailed guide about getting started, see [Get started](/get-started/introduction/_index.md). -- Get to the [Docker Scout](../../scout/_index.md) dashboard. -- Check the status of Docker services. - Access [Docker Hub](/manuals/docker-hub/_index.md) to search, browse, pull, run, or view details of images. +- Get to the [Docker Scout](../../scout/_index.md) dashboard. +- Navigate to [Docker Extensions](/manuals/extensions/_index.md). + +## Docker terminal + +From the Docker Dashboard footer, you can use the integrated terminal directly within Docker Desktop. + +The integrated terminal: + +- Persists your session if you navigate to another + part of the Docker Desktop Dashboard and then return. +- Supports copy, paste, search, and clearing your session. + +#### Open the integrated terminal + +To open the integrated terminal, either: + +- Hover over your running container and under the **Actions** column, select the **Show container actions** + menu. From the drop-down menu, select **Open in terminal**. +- Or, select the **Terminal** icon located in the bottom-right corner, next to the version number. + +To use your external terminal, navigate to the **General** tab in **Settings** +and select the **System default** option under **Choose your terminal**. ## Quick search -From the Docker Desktop Dashboard you can use Quick Search, which is located in the Dashboard header, to search for: +Use Quick Search, which is located in the Docker Dashboard header, to search for: - Any container or Compose application on your local system. You can see an overview of associated environment variables or perform quick actions, such as start, stop, or delete. @@ -49,9 +65,9 @@ From the Docker Desktop Dashboard you can use Quick Search, which is located in ## The Docker menu -Docker Desktop also provides an easy-access tray icon that appears in the taskbar and is referred to as the Docker menu {{< inline-image src="../../assets/images/whale-x.svg" alt="whale menu" >}}. +Docker Desktop also includes a tray icon, referred to as the Docker menu {{< inline-image src="../../assets/images/whale-x.svg" alt="whale menu" >}} for quick access. -To display the Docker menu, select the {{< inline-image src="../../assets/images/whale-x.svg" alt="whale menu" >}} icon. It displays the following options: +Select the {{< inline-image src="../../assets/images/whale-x.svg" alt="whale menu" >}} icon in your taskbar to open options such as: - **Dashboard**. This takes you to the Docker Desktop Dashboard. - **Sign in/Sign up** diff --git a/content/manuals/desktop/use-desktop/builds.md b/content/manuals/desktop/use-desktop/builds.md index 24b17e952443..c31b1e611409 100644 --- a/content/manuals/desktop/use-desktop/builds.md +++ b/content/manuals/desktop/use-desktop/builds.md @@ -6,33 +6,28 @@ keywords: Docker Dashboard, manage, gui, dashboard, builders, builds weight: 40 --- -![Builds view in Docker Desktop](../images/builds-view.webp) +The **Builds** view provides an interactive interface for inspecting build history, monitoring active builds, and managing builders directly in Docker Desktop. -The **Builds** view is a simple interface that lets you inspect your build -history and manage builders using Docker Desktop. - -Opening the **Builds** view in Docker Desktop displays a list of completed builds. -By default, the list is sorted by date, showing the most recent builds at the top. -You can switch to **Active builds** to view any ongoing builds. - -![Build UI screenshot active builds](../images/build-ui-active-builds.webp) +By default, the **Build history** tab displays a list of completed builds, sorted by date (newest first). Switch to the **Active builds** tab to view ongoing builds. If you're connected to a cloud builder through [Docker Build Cloud](../../build-cloud/_index.md), the Builds view also lists any active or completed cloud builds by other team members connected to the same cloud builder. +> [!NOTE] +> +> When building Windows container images using the `docker build` command, the legacy builder is used which does not populate the **Builds** view. To switch to using BuildKit, you can either: +> - Set `DOCKER_BUILDKIT=1` in the build command, such as `DOCKER_BUILDKIT=1 docker build .` or +> - Use the `docker buildx build` command + ## Show build list -Select the **Builds** view in the Docker Desktop Dashboard to open the build list. +Open the **Builds** view from the Docker Dashboard to access: -The build list shows your completed and ongoing builds. The **Build history** -tab shows completed historical builds, and from here you can inspect the build -logs, dependencies, traces, and more. The **Active builds** tab shows builds -that are currently running. +- **Build history**: Completed builds with access to logs, dependencies, traces, and more +- **Active builds**: Builds currently in progress -The list shows builds for your active, running builders. It doesn't list builds -for inactive builders: builders that you've removed from your system, or -builders that have been stopped. +Only builds from active, running builders are listed. Builds from removed or stopped builders are not shown. ### Builder settings @@ -47,7 +42,9 @@ Docker Desktop settings. The **Import builds** button lets you import build records for builds by other people, or builds in a CI environment. When you've imported a build record, it gives you full access to the logs, traces, and other data for that build, -directly in Docker Desktop. The [build summary](/manuals/build/ci/github-actions/build-summary.md) +directly in Docker Desktop. + +The [build summary](/manuals/build/ci/github-actions/build-summary.md) for the `docker/build-push-action` and `docker/bake-action` GitHub Actions includes a link to download the build records, for inspecting CI jobs with Docker Desktop. @@ -63,8 +60,6 @@ If you're inspecting a multi-platform build, the drop-down menu in the top-right of this tab lets you filter the information down to a specific platform: -![Platform filter](../images/build-ui-platform-menu.webp?w=400) - The **Source details** section shows information about the frontend [frontend](/manuals/build/buildkit/frontend.md) and, if available, the source code repository used for the build. @@ -79,8 +74,6 @@ showing a breakdown of the build execution from various angles. - **Cache usage** shows the extent to which build operations were cached. - **Parallel execution** shows how much of the build execution time was spent running steps in parallel. -![Build timing charts](../images/build-ui-timing-chart.webp) - The chart colors and legend keys describe the different build operations. Build operations are defined as follows: @@ -168,8 +161,6 @@ If the build failed, an **Error** tab displays instead of the **Source** tab. The error message is inlined in the Dockerfile source, indicating where the failure happened and why. -![Build error displayed inline in the Dockerfile](../images/build-ui-error.webp) - ### Build logs The **Logs** tab displays the build logs. @@ -193,20 +184,16 @@ helping you identify patterns and shifts in build operations over time. For instance, significant spikes in build duration or a high number of cache misses could signal opportunities for optimizing the Dockerfile. -![Build history chart](../images/build-ui-history.webp) - You can navigate to and inspect a related build by selecting it in the chart, or using the **Past builds** list below the chart. ## Manage builders -The **Builder settings** view in the Docker Desktop settings lets you: +The **Builder** tab in **Settings** lets you: - Inspect the state and configuration of active builders - Start and stop a builder - Delete build history - Add or remove builders (or connect and disconnect, in the case of cloud builders) -![Builder settings drop-down](../images/build-ui-manage-builders.webp) - For more information about managing builders, see [Change settings](/manuals/desktop/settings-and-maintenance/settings.md#builders) diff --git a/content/manuals/desktop/use-desktop/container.md b/content/manuals/desktop/use-desktop/container.md index 40e3350cf64b..2a4ff86a54eb 100644 --- a/content/manuals/desktop/use-desktop/container.md +++ b/content/manuals/desktop/use-desktop/container.md @@ -6,20 +6,20 @@ linkTitle: Containers weight: 10 --- -The **Containers** view lists all your running containers and applications. You must have running or stopped containers and applications to see them listed. +The **Containers** view lists all running and stopped containers and applications. It provides a clean interface to manage the lifecycle of your containers, interact with running applications, and inspect Docker objects—including Docker Compose apps. ## Container actions -Use the **Search** field to search for any specific container. +Use the **Search** field to find a specific container by name. -From the **Containers** view you can perform the following actions: -- Pause/Resume -- Stop/Start/Restart +From the **Containers** view you can: +- Start, stop, pause, resume, or restart containers - View image packages and CVEs -- Delete +- Delete containers - Open the application in VS code - Open the port exposed by the container in a browser -- Copy docker run. This lets you share container run details or modify certain parameters. +- Copy the `docker run` command for reuse or modification +- Use [Docker Debug](#execdebug) ## Resource usage @@ -31,11 +31,11 @@ When you [inspect a container](#inspect-a-container), the **Stats** tab displays You can obtain detailed information about the container when you select it. -From here, you can use the quick action buttons to perform various actions such as pause, resume, start or stop, or explore the **Logs**, **Inspect**, **Bind mounts**, **Exec**, **Files**, and **Stats** tabs. +From here, you can use the quick action buttons to perform various actions such as pause, resume, start or stop, or explore the **Logs**, **Inspect**, **Bind mounts**, **Debug**, **Files**, and **Stats** tabs. ### Logs -Select **Logs** to see logs from the container. You can also: +Select **Logs** to view output from the container in real time. While viewing logs, you can: - Use `Cmd + f`/`Ctrl + f` to open the search bar and find specific entries. Search matches are highlighted in yellow. @@ -43,50 +43,32 @@ Select **Logs** to see logs from the container. You can also: respectively. - Use the **Copy** icon in the top right-hand corner to copy all the logs to your clipboard. -- Automatically copy any logs content by highlighting a few lines or a section - of the logs. +- Show timestamps - Use the **Clear terminal** icon in the top right-hand corner to clear the logs terminal. - Select and view external links that may be in your logs. +You can refine your view by: + +- Filtering logs for specific containers, if you're running a multi-container application. +- Using regular expressions or exact match search terms + ### Inspect Select **Inspect** to view low-level information about the container. It displays the local path, version number of the image, SHA-256, port mapping, and other details. -### Integrated terminal +### Exec/Debug -From the **Exec** tab, you can use the integrated terminal, on a running -container, directly within Docker Desktop. You are able to quickly run commands -within your container so you can understand its current state or debug when -something goes wrong. +If you have not enabled Docker Debug in settings, the **Exec** tab displays. It lets you quickly run commands within your running container. -Using the integrated terminal is the same as running one of the following commands: +Using the **Exec** tab is the same as running one of the following commands: - `docker exec -it /bin/sh` - `docker exec -it cmd.exe` when accessing Windows containers -- `docker debug ` when using debug mode - -The integrated terminal: - -- Persists your session and **Debug mode** setting if you navigate to another - part of the Docker Desktop Dashboard and then return. -- Supports copy, paste, search, and clearing your session. -- When not using debug mode, it automatically detects the default user for a - running container from the image's Dockerfile. If no user is specified, or - you're using debug mode, it defaults to `root`. - -#### Open the integrated terminal - -To open the integrated terminal, either: - -- Hover over your running container and under the **Actions** column, select the **Show container actions** - menu. From the drop-down menu, select **Open in terminal**. -- Or, select the container and then select the **Exec** tab. -To use your external terminal, navigate to the **General** tab in **Settings** -and select the **System default** option under **Choose your terminal**. +For more details, see the [`docker exec` CLI reference](/reference/cli/docker/exec/). -#### Open the integrated terminal in debug mode +If you have enabled Docker Debug in settings, or toggled on **Debug mode** to the right of the tab options, the **Debug** tab displays. Debug mode requires a [Pro, Team, or Business subscription](/subscription/details/). Debug mode has several advantages, such as: @@ -95,7 +77,7 @@ Debug mode requires a [Pro, Team, or Business subscription](/subscription/detail - The ability to access containers that don't have a shell, for example, slim or distroless containers. -To open the integrated terminal in debug mode: +To use debug mode: 1. Sign in to Docker Desktop with an account that has a Pro, Team, or Business subscription. @@ -103,11 +85,9 @@ To open the integrated terminal in debug mode: - Hover over your running container and under the **Actions** column, select the **Show container actions** menu. From the drop-down menu, select **Use Docker Debug**. - - Or, select the container and then select the **Debug** tab. If the - **Debug** tab isn't visible, select the **Exec** tab and then enable the - **Debug mode** setting. + - Or, select the container and then select the **Debug** tab. -To use debug mode by default when accessing the integrated terminal, navigate to +To use debug mode by default, navigate to the **General** tab in **Settings** and select the **Enable Docker Debug by default** option. diff --git a/content/manuals/desktop/use-desktop/images.md b/content/manuals/desktop/use-desktop/images.md index c44b7b477d8f..b99038c6d25f 100644 --- a/content/manuals/desktop/use-desktop/images.md +++ b/content/manuals/desktop/use-desktop/images.md @@ -6,6 +6,9 @@ linkTitle: Images weight: 20 --- + +The **Images** view displays a list of your Docker images and allows you to run an image as a container, pull the latest version of an image from Docker Hub, and inspect images. It also displays a summary of image vulnerabilities. In addition, the **Images** view contains clean-up options to remove unwanted images from the disk to reclaim space. If you are logged in, you can also see the images you and your organization have shared on Docker Hub. For more information, see [Explore your images](images.md). + The **Images** view lets you manage Docker images without having to use the CLI. By default, it displays a list of all Docker images on your local disk. You can also view Hub images once you have signed in to Docker Hub. This allows you to collaborate with your team and manage your images directly through Docker Desktop. @@ -87,10 +90,10 @@ To remove individual images, select the bin icon. The **Images** view also allows you to manage and interact with images in Docker Hub repositories. By default, when you go to **Images** in Docker Desktop, you see a list of images that exist in your local image store. -The **Local** and **Hub repositories** tabs near the top toggles between viewing images in your local image store, +The **Local** and **Docker Hub repositories** tabs near the top toggles between viewing images in your local image store, and images in remote Docker Hub repositories that you have access to. -Switching to the **Hub repositories** tab prompts you to sign in to your Docker Hub account, if you're not already signed in. +Switching to the **Docker Hub repositories** tab prompts you to sign in to your Docker Hub account, if you're not already signed in. When signed in, it shows you a list of images in Docker Hub organizations and repositories that you have access to. Select an organization from the drop-down to view a list of repositories for that organization. diff --git a/content/manuals/desktop/use-desktop/pause.md b/content/manuals/desktop/use-desktop/pause.md index 2bfdc9d6c28b..e7b882097f0b 100644 --- a/content/manuals/desktop/use-desktop/pause.md +++ b/content/manuals/desktop/use-desktop/pause.md @@ -5,13 +5,12 @@ title: Pause Docker Desktop weight: 60 --- -When Docker Desktop is paused, the Linux VM running Docker Engine is paused, the current state of all your containers are saved in memory, and all processes are frozen. This reduces the CPU and memory usage and helps you retain a longer battery life on your laptop. +Pausing Docker Desktop temporarily suspends the Linux VM running Docker Engine. This saves the current state of all containers in memory and freezes all running processes, significantly reducing CPU and memory usage which is helpful for conserving battery on laptops. -You can manually pause Docker Desktop by selecting the Docker menu {{< inline-image src="../images/whale-x.svg" alt="whale menu" >}} and then **Pause**. To manually resume Docker Desktop, select the **Resume** option in the Docker menu, or run any Docker CLI command. +To pause Docker Desktop, select the **Pause** icon to the left of the footer in the Docker Dashboard. To manually resume Docker Desktop, select the **Resume** option in the Docker menu, or run any Docker CLI command. When you manually pause Docker Desktop, a paused status displays on the Docker menu and on the Docker Desktop Dashboard. You can still access the **Settings** and the **Troubleshoot** menu. > [!TIP] > -> The Resource Saver feature, available in Docker Desktop version 4.24 and later, is enabled by default and provides better -> CPU and memory savings than the manual Pause feature. See [here](resource-saver.md) for more info. +> The Resource Saver feature is enabled by default and provides better CPU and memory savings than the manual Pause feature. See [Resource Saver mode](resource-saver.md) for more info. diff --git a/content/manuals/desktop/use-desktop/resource-saver.md b/content/manuals/desktop/use-desktop/resource-saver.md index 4b14ec6dd2c3..10162288b357 100644 --- a/content/manuals/desktop/use-desktop/resource-saver.md +++ b/content/manuals/desktop/use-desktop/resource-saver.md @@ -6,7 +6,7 @@ linkTitle: Resource Saver mode weight: 50 --- -Resource Saver is a new feature available in Docker Desktop version 4.24 and later. It significantly reduces Docker +Resource Saver mode significantly reduces Docker Desktop's CPU and memory utilization on the host by 2 GBs or more, by automatically stopping the Docker Desktop Linux VM when no containers are running for a period of time. The default time is set to 5 minutes, but this can be adjusted to suit your needs. @@ -15,7 +15,7 @@ With Resource Saver mode, Docker Desktop uses minimal system resources when it's allowing you to save battery life on your laptop and improve your multi-tasking experience. -## How to configure Resource Saver +## Configure Resource Saver Resource Saver is enabled by default but can be disabled by navigating to the **Resources** tab, in **Settings**. You can also configure the idle timer as shown below. @@ -74,10 +74,3 @@ users enable WSL's `autoMemoryReclaim` feature as described in the [Docker Desktop WSL docs](/manuals/desktop/features/wsl/_index.md). Finally, since Docker Desktop does not stop the Linux VM on WSL, exit from Resource Saver mode is immediate (there's no exit delay). - -## Feedback - -To give feedback or report any bugs you may find, create an issue on the appropriate Docker Desktop GitHub repository: - -- [for-mac](https://github.com/docker/for-mac) -- [for-win](https://github.com/docker/for-win) diff --git a/content/manuals/desktop/use-desktop/volumes.md b/content/manuals/desktop/use-desktop/volumes.md index b065d83a5eff..1486ff4f8dbf 100644 --- a/content/manuals/desktop/use-desktop/volumes.md +++ b/content/manuals/desktop/use-desktop/volumes.md @@ -6,10 +6,7 @@ linkTitle: Volumes weight: 30 --- -The **Volumes** view in Docker Desktop Dashboard lets you create, delete, and perform -other actions on your [volumes](/manuals/engine/storage/volumes.md). You can also see -which volumes are being used as well as inspect the files and folders in your -volumes. +The **Volumes** view in Docker Desktop lets you create, inspect, delete, clone, empty, export, and import [Docker volumes](/manuals/engine/storage/volumes.md). You can also browse files and folders in volumes and see which containers are using them. ## View your volumes @@ -119,7 +116,7 @@ To empty a volume: ## Export a volume -You can export the content of a volume to a local file, a local image, an to an +You can export the content of a volume to a local file, a local image, and to an image in Docker Hub, or to a supported cloud provider. When exporting content from a volume used by one or more running containers, the containers are temporarily stopped while Docker exports the content, and then restarted when diff --git a/content/manuals/dhi/_index.md b/content/manuals/dhi/_index.md new file mode 100644 index 000000000000..45cadeb1dcfb --- /dev/null +++ b/content/manuals/dhi/_index.md @@ -0,0 +1,50 @@ +--- +title: Docker Hardened Images +description: Secure, minimal, and production-ready base images +weight: 13 +params: + sidebar: + badge: + color: green + text: New + group: Products + grid_sections: + - title: Quickstart + description: Follow a step-by-step guide to explore, mirror, and run a Docker Hardened Image. + icon: rocket_launch + link: /dhi/get-started/ + - title: About + description: Learn what Docker Hardened Images are, how they're built, and what sets them apart from typical base images. + icon: info + link: /dhi/about/ + - title: Features + description: Discover the security, compliance, and enterprise-readiness features built into Docker Hardened Images. + icon: lock + link: /dhi/features/ + - title: How-tos + description: Step-by-step guides for using, verifying, scanning, and migrating to Docker Hardened Images. + icon: play_arrow + link: /dhi/how-to/ + - title: Core concepts + description: Understand the secure supply chain principles that make Docker Hardened Images production-ready. + icon: fact_check + link: /dhi/core-concepts/ + - title: Troubleshoot + description: Resolve common issues with building, running, or debugging Docker Hardened Images. + icon: help_center + link: /dhi/troubleshoot/ +--- + +{{< summary-bar feature_name="Docker Hardened Images" >}} + +Docker Hardened Images (DHIs) are minimal, secure, and production-ready +container base and application images maintained by Docker. Designed to reduce +vulnerabilities and simplify compliance, DHIs integrate easily into your +existing Docker-based workflows with little to no retooling required. + +Explore the sections below to get started with Docker Hardened Images, integrate +them into your workflow, and learn what makes them secure and enterprise-ready. + +{{< grid + items="grid_sections" +>}} diff --git a/content/manuals/dhi/about/_index.md b/content/manuals/dhi/about/_index.md new file mode 100644 index 000000000000..a449c40fab25 --- /dev/null +++ b/content/manuals/dhi/about/_index.md @@ -0,0 +1,35 @@ +--- +title: About +description: Learn about Docker Hardened Images, their purpose, how they are built and tested, and the shared responsibility model for security. +weight: 5 +params: + grid_about: + - title: What are hardened images and why use them? + description: Learn what a hardened image is, how Docker Hardened Images are built, what sets them apart from typical base and application images, and why you should use them. + icon: info + link: /dhi/about/what/ + - title: Image testing + description: See how Docker Hardened Images are automatically tested for standards compliance, functionality, and security. + icon: science + link: /dhi/about/test/ + - title: Responsibility overview + description: Understand Docker's role and your responsibilities when using Docker Hardened Images as part of your secure software supply chain. + icon: group + link: /dhi/about/responsibility/ + - title: Image types + description: Learn about the different image types, distributions, and variants offered in the Docker Hardened Images catalog. + icon: view_module + link: /dhi/about/available/ +--- + +Docker Hardened Images (DHIs) are purpose-built for security, compliance, and +reliability in modern software supply chains. This section explains what makes +these images different from standard base and application images, how they're +built and tested, and how Docker and users share responsibility in securing +containerized workloads. + +## Learn about Docker Hardened Images + +{{< grid + items="grid_about" +>}} \ No newline at end of file diff --git a/content/manuals/dhi/about/available.md b/content/manuals/dhi/about/available.md new file mode 100644 index 000000000000..269b6eaeedb6 --- /dev/null +++ b/content/manuals/dhi/about/available.md @@ -0,0 +1,94 @@ +--- +linktitle: Image types +title: Available types of Docker Hardened Images +description: Learn about the different image types, distributions, and variants offered in the Docker Hardened Images catalog. +keywords: docker hardened images, distroless containers, distroless images, docker distroless, alpine base image, debian base image, development containers, runtime containers, secure base image, multi-stage builds +weight: 20 +--- + +Docker Hardened Images (DHI) is a comprehensive catalog of +security-hardened container images built to meet diverse +development and production needs. + +## Framework and application images + +DHI includes a selection of popular frameworks and application images, each +hardened and maintained to ensure security and compliance. These images +integrate seamlessly into existing workflows, allowing developers to focus on +building applications without compromising on security. + +For example, you might find repositories like the following in the DHI catalog: + +- `node`: framework for Node.js applications +- `python`: framework for Python applications +- `nginx`: web server image + +## Compatibility options + +Docker Hardened Images are available in different base image options, giving you +flexibility to choose the best match for your environment and workload +requirements: + +- Debian-based images: A good fit if you're already working in glibc-based + environments. Debian is widely used and offers strong compatibility across + many language ecosystems and enterprise systems. + +- Alpine-based images: A smaller and more lightweight option using musl libc. + These images tend to be small and are therefore faster to pull and have a + reduced footprint. + +Each image maintains a minimal and secure runtime layer by removing +non-essential components like shells, package managers, and debugging tools. +This helps reduce the attack surface while retaining compatibility with common +runtime environments. + +Example tags include: + +- `3.9.23-alpine3.21`: Alpine-based image for Python 3.9.23 +- `3.9.23-debian12`: Debian-based image for Python 3.9.23 + +If you're not sure which to choose, start with the base you're already familiar +with. Debian tends to offer the broadest compatibility. + +## Development and runtime variants + +To accommodate different stages of the application lifecycle, DHI offers all +language framework images and select application images in two variants: + +- Development (dev) images: Equipped with necessary development tools and +libraries, these images facilitate the building and testing of applications in a +secure environment. They include a shell, package manager, a root user, and +other tools needed for development. + +- Runtime images: Stripped of development tools, these images contain only the +essential components needed to run applications, ensuring a minimal attack +surface in production. + +This separation supports multi-stage builds, enabling developers to compile code +in a secure build environment and deploy it using a lean runtime image. + +For example, you might find tags like the following in a DHI repository: + +- `3.9.23-debian12`: runtime image for Python 3.9.23 +- `3.9.23-debian12-dev`: development image for Python 3.9.23 + +## FIPS variants + +Some Docker Hardened Images include a `-fips` variant. These variants use +cryptographic modules that have been validated under [FIPS +140](../core-concepts/fips.md), a U.S. government standard for secure +cryptographic operations. + +FIPS variants are designed to help organizations meet regulatory and compliance +requirements related to cryptographic use in sensitive or regulated +environments. + +You can recognize FIPS variants by their tag that includes `-fips`. + +For example: +- `3.13-fips`: FIPS variant of the Python 3.13 image +- `3.9.23-debian12-fips`: FIPS variant of the Debian-based Python 3.9.23 image + +FIPS variants can be used in the same way as any other Docker Hardened Image and +are ideal for teams operating in regulated industries or under compliance +frameworks that require cryptographic validation. diff --git a/content/manuals/dhi/about/responsibility.md b/content/manuals/dhi/about/responsibility.md new file mode 100644 index 000000000000..eebc26942292 --- /dev/null +++ b/content/manuals/dhi/about/responsibility.md @@ -0,0 +1,66 @@ +--- +title: Understanding roles and responsibilities for Docker Hardened Images +linkTitle: Responsibility overview +description: Understand the division of responsibilities between Docker, upstream projects, and you when using Docker Hardened Images. +keywords: software supply chain security, signed sbom, vex document, container provenance, image attestation +weight: 46 +--- + +Docker Hardened Images (DHIs) are curated and maintained by Docker, and built +using upstream open source components. To deliver security, reliability, and +compliance, responsibilities are shared among three groups: + +- Upstream maintainers: the developers and communities responsible for the + open source software included in each image. +- Docker: the provider of hardened, signed, and maintained container images. +- You (the customer): the consumer who runs and, optionally, customizes DHIs + in your environment. + +This topic outlines who handles what, so you can use DHIs effectively and +securely. + +## Releases + +- Upstream: Publishes and maintains official releases of the software + components included in DHIs. This includes versioning, changelogs, and + deprecation notices. +- Docker: Builds, hardens, and signs Docker Hardened Images based on + upstream versions. Docker maintains these images in line with upstream release + timelines and internal policies. +- You: Ensure you're staying on supported versions of DHIs and upstream + projects. Using outdated or unsupported components can introduce security + risk. + +## Patching + +- Upstream: Maintains and updates the source code for each component, + including fixing vulnerabilities in libraries and dependencies. +- Docker: Rebuilds and re-releases images with upstream patches applied. + Docker also monitors for vulnerabilities and rapidly publishes updates to + affected images. +- You: Apply DHI updates in your environments and patch any software or + dependencies you install on top of the base image. + +## Testing + +- Upstream: Defines the behavior and functionality of the original software, + and is responsible for validating core features. +- Docker: Validates that DHIs start, run, and behave consistently with + upstream expectations. Docker also runs security scans and includes a [testing + attestation](../core-concepts/attestations.md) with each image. +- You: Test your application on top of DHIs and validate that any changes or + customizations function as expected in your environment. + +## Security and compliance + +- Docker: Publishes signed SBOMs, VEX documents, provenance data, and CVE + scan results with each image to support compliance and supply chain security. +- You: Integrate DHIs into your security and compliance workflows, including + vulnerability management and auditing. + +## Summary + +Docker Hardened Images give you a secure foundation, complete with signed +metadata and upstream transparency. Your role is to make informed use of these +images, apply updates promptly, and validate that your configurations and +applications meet your internal requirements. \ No newline at end of file diff --git a/content/manuals/dhi/about/test.md b/content/manuals/dhi/about/test.md new file mode 100644 index 000000000000..8dc3aa625ef2 --- /dev/null +++ b/content/manuals/dhi/about/test.md @@ -0,0 +1,148 @@ +--- +title: How Docker Hardened Images are tested +linktitle: Image testing +description: See how Docker Hardened Images are automatically tested for standards compliance, functionality, and security. +keywords: docker scout, test attestation, cosign verify, image testing, vulnerability scan +weight: 45 +--- + +Docker Hardened Images (DHIs) are designed to be secure, minimal, and +production-ready. To ensure their reliability and security, Docker employs a +comprehensive testing strategy, which you can independently verify using signed +attestations and open tooling. + +Every image is tested for standards compliance, functionality, and security. The +results of this testing are embedded as signed attestations, which can be +[inspected and verified](#view-and-verify-the-test-attestation) programmatically +using the Docker Scout CLI. + +## Testing strategy overview + +The testing process for DHIs focuses on two main areas: + +- Image standards compliance: Ensuring that each image adheres to strict size, + security, and compatibility standards. +- Application functionality: Verifying that applications within the images + function correctly. + +## Image standards compliance + +Each DHI undergoes rigorous checks to meet the following standards: + +- Minimal attack surface: Images are built to be as small as possible, removing + unnecessary components to reduce potential vulnerabilities. +- Near-zero known CVEs: Images are scanned using tools like Docker Scout to + ensure they are free from known Common Vulnerabilities and Exposures (CVEs). +- Multi-architecture support: DHIs are built for multiple architectures + (`linux/amd64` and `linux/arm64`) to ensure broad compatibility. +- Kubernetes compatibility: Images are tested to run seamlessly within + Kubernetes clusters, ensuring they meet the requirements for container + orchestration environments. + +## Application functionality testing + +Docker tests Docker Hardened Images to ensure they behave as expected in typical +usage scenarios. This includes verifying that: + +- Applications start and run successfully in containerized environments. +- Runtime behavior aligns with upstream expectations. +- Build variants (like `-dev` images) support common development and build tasks. + +The goal is to ensure that DHIs work out of the box for the most common use +cases while maintaining the hardened, minimal design. + +## Automated testing and CI/CD integration + +Docker integrates automated testing into its Continuous Integration/Continuous +Deployment (CI/CD) pipelines: + +- Automated scans: Each image build triggers automated scans for vulnerabilities + and compliance checks. +- Reproducible builds: Build processes are designed to be reproducible, ensuring + consistency across different environments. +- Continuous monitoring: Docker continuously monitors for new vulnerabilities + and updates images accordingly to maintain security standards. + +## Testing attestation + +Docker provides a test attestation that details the testing and validation +processes each DHI has undergone. + +### View and verify the test attestation + +You can view and verify this attestation using the Docker Scout CLI. + +1. Use the `docker scout attest get` command with the test predicate type: + + ```console + $ docker scout attest get \ + --predicate-type https://scout.docker.com/tests/v0.1 \ + --predicate \ + /dhi-: --platform + ``` + + For example: + + ```console + $ docker scout attest get \ + --predicate-type https://scout.docker.com/tests/v0.1 \ + --predicate \ + docs/dhi-python:3.13 --platform linux/amd64 + ``` + + This contains a list of tests and their results. + + Example output: + + ```console + v SBOM obtained from attestation, 101 packages found + v Provenance obtained from attestation + { + "reportFormat": "CTRF", + "results": { + "summary": { + "failed": 0, + "passed": 1, + "skipped": 0, + "start": 1749216533, + "stop": 1749216574, + "tests": 1 + }, + "tests": [ + { + ... + ``` + +2. Verify the test attestation signature. To ensure the attestation is authentic + and signed by Docker, run: + + ```console + docker scout attest get \ + --predicate-type https://scout.docker.com/tests/v0.1 \ + --verify \ + /dhi-: --platform + ``` + + Example output: + + ```console + v SBOM obtained from attestation, 101 packages found + v Provenance obtained from attestation + v cosign verify registry.scout.docker.com/docker/dhi-python@sha256:70c8299c4d3cb4d5432734773c45ae58d8acc2f2f07803435c65515f662136d5 \ + --key https://registry.scout.docker.com/keyring/dhi/latest.pub --experimental-oci11 + + Verification for registry.scout.docker.com/docker/dhi-python@sha256:70c8299c4d3cb4d5432734773c45ae58d8acc2f2f07803435c65515f662136d5 -- + The following checks were performed on each of these signatures: + - The cosign claims were validated + - Existence of the claims in the transparency log was verified offline + - The signatures were verified against the specified public key + + i Signature payload + ... + ``` + +If the attestation is valid, Docker Scout will confirm the signature and show +the matching `cosign verify` command. + +To view other attestations, such as SBOMs or vulnerability reports, see [Verify +an image](../how-to/verify.md). diff --git a/content/manuals/dhi/about/what.md b/content/manuals/dhi/about/what.md new file mode 100644 index 000000000000..3c4de9a4386e --- /dev/null +++ b/content/manuals/dhi/about/what.md @@ -0,0 +1,95 @@ +--- +title: What are hardened images and why use them? +linktitle: Hardened images +description: Learn what a hardened image is, why it matters, and how Docker Hardened Images offer stronger security, compliance, and operational efficiency. +keywords: hardened container image, docker hardened images, distroless container, slsa build level 3, signed sbom, vulnerability scan, compliance-ready container +weight: 5 +--- + +In today’s diverse software environments, container images are often designed +for flexibility and broad compatibility. While that makes them ideal for many +use cases, it can also result in images that include more components than needed +for specific workloads. Docker Hardened Images take a minimal-by-design approach +to help reduce image size, limit the attack surface, and streamline security and +compliance workflows. + +Hardened images solve this by minimizing what's in the container image. Less +software means fewer vulnerabilities, faster deployments, and fewer red +dashboards to chase down every week. + +For platform engineers and security teams, hardened images offer a way out of +the CVE triage cycle, letting you focus on delivering secure, compliant +infrastructure without constant firefighting. + +## What is a hardened image? + +A hardened image is a container image that has been deliberately minimized and +secured to reduce vulnerabilities and meet stringent security and compliance +requirements. Unlike standard images, which may include non-essential components +that increase risk, hardened images are streamlined to include only what’s +needed to run your application securely. + +## Benefits of hardened images + +- Reduced attack surface: By removing non-essential components, hardened images + limit potential entry points for attackers. +- Improved security posture: Regular updates and vulnerability scans help ensure + hardened images remain secure over time. +- Compliance facilitation: Inclusion of signed metadata like SBOMs supports + meeting regulatory and organizational compliance standards. +- Operational efficiency: Smaller image sizes lead to faster pulls, lower runtime overhead, and reduced cloud resource costs. + +## What is a Docker Hardened Image? + +Docker Hardened Images (DHIs) take hardened images even further by combining +minimal, secure design with enterprise-grade support and tooling. Built with +security at the core, these images are continuously maintained, tested, and +validated to meet today’s toughest software supply chain and compliance +standards. + +Docker Hardened Images are secure by default, minimal by design, and maintained +so you don’t have to. + +## How Docker Hardened Images differ from generic hardened images + +- SLSA-compliant builds: Docker Hardened Images are built to meet [SLSA Build + Level 3](../core-concepts/slsa.md), ensuring a tamper-resistant, verifiable, + and auditable build process that protects against supply chain threats. + +- Distroless approach: Unlike traditional base images that bundle an entire OS + with shells, package managers, and debugging tools, [distroless + images](../core-concepts/distroless.md) retain only the minimal OS components + required to run your application. By excluding unnecessary tooling and + libraries, they reduce the attack surface by up to 95% and can improve + performance and image size. + +- Continuous maintenance: All DHIs are continuously monitored and updated to + maintain near-zero known exploitable [CVEs](../core-concepts/cves.md), helping + your teams avoid patch fatigue and surprise alerts. + +- Compliance-ready: Each image includes cryptographically signed metadata: + - [SBOMs](../core-concepts/sbom.md) that show what's in the image + - [VEX documents](../core-concepts/vex.md) to identify which vulnerabilities + are actually exploitable + - [Build provenance](../core-concepts/provenance.md) that proves how and where + the image was built + +- Compatibility-focused design: Docker Hardened Images provide a minimal runtime + environment while maintaining compatibility with common Linux distributions. + They remove non-essential components like shells and package managers to + enhance security, yet retain a small base layer built on familiar distribution + standards. Images are typically available with musl libc (Alpine-based) and + glibc (Debian-based), supporting a broad range of application compatibility + needs. + +## Why use Docker Hardened Images? + +Docker Hardened Images (DHIs) are secure by default, minimal by design, and +maintained so you don't have to. They offer: + + +- Images built for peace of mind: Ultra-minimal and distroless, DHIs eliminate up to 95% of the traditional container attack surface. +- No more patch panic: With continuous CVE scanning and SLA-backed remediation, Docker helps you stay ahead of threats. +- Audit-ready images: All DHIs include signed SBOMs, VEX, and provenance that support security and compliance workflows. +- Images that work with your stack: Available in Alpine and Debian flavors, DHIs drop into your existing Dockerfiles and pipelines. +- Images backed by enterprise support: Get peace of mind with Docker's support and rapid response to critical vulnerabilities. diff --git a/content/manuals/dhi/core-concepts/_index.md b/content/manuals/dhi/core-concepts/_index.md new file mode 100644 index 000000000000..fd098701864b --- /dev/null +++ b/content/manuals/dhi/core-concepts/_index.md @@ -0,0 +1,97 @@ +--- +title: Core concepts +description: Learn the core concepts behind Docker Hardened Images, including security metadata, vulnerability management, image structure, and verification. +weight: 30 +params: + grid_concepts_metadata: + - title: Attestations + description: Review the full set of signed attestations included with each Docker Hardened Image, such as SBOMs, VEX, build provenance, and scan results. + icon: assignment + link: /dhi/core-concepts/attestations/ + - title: Software Bill of Materials (SBOMs) + description: Learn what SBOMs are, why they matter, and how Docker Hardened Images include signed SBOMs to support transparency and compliance. + icon: list_alt + link: /dhi/core-concepts/sbom/ + - title: Supply-chain Levels for Software Artifacts (SLSA) + description: Learn how Docker Hardened Images comply with SLSA Build Level 3 and how to verify provenance for secure, tamper-resistant builds. + icon: fact_check + link: /dhi/core-concepts/slsa/ + - title: Image provenance + description: Learn how build provenance metadata helps trace the origin of Docker Hardened Images and support compliance with SLSA. + icon: track_changes + link: /dhi/core-concepts/provenance/ + - title: FIPS + description: Learn how Docker Hardened Images support FIPS 140 by using validated cryptographic modules and providing signed attestations for compliance audits. + icon: verified + link: /dhi/core-concepts/fips/ + + grid_concepts_risk: + - title: Common Vulnerabilities and Exposures (CVEs) + description: Understand what CVEs are, how Docker Hardened Images reduce exposure, and how to scan images for vulnerabilities using popular tools. + icon: error + link: /dhi/core-concepts/cves/ + - title: Vulnerability Exploitability eXchange (VEX) + description: Learn how VEX helps you prioritize real risks by identifying which vulnerabilities in Docker Hardened Images are actually exploitable. + icon: warning + link: /dhi/core-concepts/vex/ + - title: Software Supply Chain Security + description: Learn how Docker Hardened Images help secure every stage of your software supply chain with signed metadata, provenance, and minimal attack surface. + icon: shield + link: /dhi/core-concepts/sscs/ + - title: Secure Software Development Lifecycle (SSDLC) + description: See how Docker Hardened Images support a secure SDLC by integrating with scanning, signing, and debugging tools. + icon: build_circle + link: /dhi/core-concepts/ssdlc/ + + grid_concepts_structure: + - title: Distroless images + description: Learn how Docker Hardened Images use distroless variants to minimize attack surface and remove unnecessary components. + icon: layers_clear + link: /dhi/core-concepts/distroless/ + - title: glibc and musl support in Docker Hardened Images + description: Compare glibc and musl variants of DHIs to choose the right base image for your application’s compatibility, size, and performance needs. + icon: swap_vert + link: /dhi/core-concepts/glibc-musl/ + - title: Image immutability + description: Understand how image digests, read-only containers, and signed metadata ensure Docker Hardened Images are tamper-resistant and immutable. + icon: do_not_disturb_on + link: /dhi/core-concepts/immutability/ + - title: Image hardening + description: Learn how Docker Hardened Images are designed for security, with minimal components, nonroot execution, and secure-by-default configurations. + icon: security + link: /dhi/core-concepts/hardening/ + + grid_concepts_verification: + - title: Digests + description: Learn how to use immutable image digests to guarantee consistency and verify the exact Docker Hardened Image you're running. + icon: fingerprint + link: /dhi/core-concepts/digests/ + - title: Code signing + description: Understand how Docker Hardened Images are cryptographically signed using Cosign to verify authenticity, integrity, and secure provenance. + icon: key + link: /dhi/core-concepts/signatures/ +--- + +Docker Hardened Images (DHIs) are built on a foundation of secure software +supply chain practices. This section explains the core concepts behind that +foundation, from signed attestations and immutable digests to standards like SLSA +and VEX. + +Start here if you want to understand how Docker Hardened Images support compliance, +transparency, and security. + +## Security metadata and attestations + +{{< grid items="grid_concepts_metadata" >}} + +## Vulnerability and risk management + +{{< grid items="grid_concepts_risk" >}} + +## Image structure and behavior + +{{< grid items="grid_concepts_structure" >}} + +## Verification and traceability + +{{< grid items="grid_concepts_verification" >}} \ No newline at end of file diff --git a/content/manuals/dhi/core-concepts/attestations.md b/content/manuals/dhi/core-concepts/attestations.md new file mode 100644 index 000000000000..b0411c74df3f --- /dev/null +++ b/content/manuals/dhi/core-concepts/attestations.md @@ -0,0 +1,100 @@ +--- +title: Attestations +description: Review the full set of signed attestations included with each Docker Hardened Image, such as SBOMs, VEX, build provenance, and scan results. +keywords: container image attestations, signed sbom, build provenance, slsa compliance, vex document +--- + +Docker Hardened Images (DHIs) include comprehensive, signed security +attestations that verify the image's build process, contents, and security +posture. These attestations are a core part of secure software supply chain +practices and help users validate that an image is trustworthy and +policy-compliant. + +## What is an attestation? + +An attestation is a signed statement that provides verifiable information +about an image, such as how it was built, what's inside it, and what security +checks it has passed. Attestations are typically signed using Sigstore tooling +(such as Cosign), making them tamper-evident and cryptographically verifiable. + +Attestations follow standardized formats (like [in-toto](https://in-toto.io/), +[CycloneDX](https://cyclonedx.org/), and [SLSA](https://slsa.dev/)) and are +attached to the image as OCI-compliant metadata. They can be generated +automatically during image builds or added manually to document extra tests, +scan results, or custom provenance. + +## Why are attestations important? + +Attestations provide critical visibility into the software supply chain by: + +- Documenting *what* went into an image (e.g., SBOMs) +- Verifying *how* it was built (e.g., build provenance) +- Capturing *what security scans* it has passed or failed (e.g., CVE reports, + secrets scans, test results) +- Helping organizations enforce compliance and security policies +- Supporting runtime trust decisions and CI/CD policy gates + +They are essential for meeting industry standards such as SLSA, +and help teams reduce the risk of supply chain attacks by making build and +security data transparent and verifiable. + +## How Docker Hardened Images use attestations + +All DHIs are built using [SLSA Build Level +3](https://slsa.dev/spec/latest/levels) practices, and each image variant is +published with a full set of signed attestations. These attestations allow users +to: + +- Verify that the image was built from trusted sources in a secure environment +- View SBOMs in multiple formats to understand component-level details +- Review scan results to check for vulnerabilities or embedded secrets +- Confirm the build and deployment history of each image + +Attestations are automatically published and associated with each mirrored DHI +in your Docker Hub organization. They can be inspected using tools like [Docker +Scout](../how-to/verify.md) or +[Cosign](https://docs.sigstore.dev/cosign/overview), and are consumable by CI/CD +tooling or security platforms. + +## Available attestations + +The following attestations are available for each image variant. + +| Attestation type | Description | Predicate type URI | +|----------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------| +| CycloneDX SBOM | A software bill of materials in [CycloneDX](https://cyclonedx.org/) format, listing components, libraries, and versions. | `https://cyclonedx.org/bom/v1.5` | +| SPDX SBOM | An SBOM in [SPDX](https://spdx.dev/) format, widely adopted in open-source ecosystems. | `https://spdx.dev/Document` | +| Scout SBOM | An SBOM generated and signed by Docker Scout, including additional Docker-specific metadata. | `https://scout.docker.com/sbom/v0.1` | +| CVEs (in-toto format) | A list of known vulnerabilities (CVEs) affecting the image's components, based on package and distro scanning. | `https://in-toto.io/attestation/vulns/v0.1` | +| CVEs (Scout format) | A vulnerability report generated by Docker Scout, listing known CVEs and severity data. | `https://scout.docker.com/vulnerabilities/v0.1` | +| VEX | A [Vulnerability Exploitability eXchange (VEX)](https://openvex.dev/) document that identifies vulnerabilities that do not apply to the image and explains why (e.g., not reachable or not present). | `https://openvex.dev/ns/v0.2.0` | +| Secrets Scan | Results of a scan for accidentally included secrets, such as credentials, tokens, or private keys. | `https://scout.docker.com/secrets/v0.1` | +| Virus Scan | Results of antivirus scans performed on the image layers. | `https://scout.docker.com/virus/v0.1` | +| Tests | A record of automated tests run against the image, such as functional checks or validation scripts. | `https://scout.docker.com/tests/v0.1` | +| Scout Health Score | A signed attestation from Docker Scout that summarizes the overall security and quality posture of the image. | `https://scout.docker.com/health/v0.1` | +| Build Provenance (Scout) | Provenance metadata generated by Docker Scout, including the source Git commit, build parameters, and environment details. | `https://scout.docker.com/provenance/v0.1` | +| SLSA Provenance | A standard [SLSA](https://slsa.dev/) provenance statement describing how the image was built, including build tool, parameters, and source. | `https://slsa.dev/provenance/v0.2` | +| SLSA Verification Summary | A summary attestation indicating the image's compliance with SLSA requirements. | `https://slsa.dev/verification_summary/v1` | + +## View and verify attestations + +To view and verify attestations for an image, see [Verify a Docker Hardened +Image](../how-to/verify.md). + +## Add your own attestations + +In addition to the comprehensive attestations provided by Docker Hardened +Images, you can add your own signed attestations when building derivative +images. This is especially useful if you’re building new applications on top of +a DHI and want to maintain transparency, traceability, and trust in your +software supply chain. + +By attaching attestations such as SBOMs, build provenance, or custom metadata, +you can meet compliance requirements, pass security audits, and support policy +evaluation tools like Docker Scout. + +These attestations can then be verified downstream using tools +like Cosign or Docker Scout. + +To learn how to attach custom attestations during the build process, see [Build +attestations](/manuals/build/metadata/attestations.md). diff --git a/content/manuals/dhi/core-concepts/cves.md b/content/manuals/dhi/core-concepts/cves.md new file mode 100644 index 000000000000..cb707c08f2d9 --- /dev/null +++ b/content/manuals/dhi/core-concepts/cves.md @@ -0,0 +1,179 @@ +--- +title: Common Vulnerabilities and Exposures (CVEs) +linktitle: CVEs +description: Understand what CVEs are, how Docker Hardened Images reduce exposure, and how to scan images for vulnerabilities using popular tools. +keywords: docker cve scan, grype vulnerability scanner, trivy image scan, vex attestation, secure container images +--- + +## What are CVEs? + +CVEs are publicly disclosed cybersecurity flaws in software or hardware. Each +CVE is assigned a unique identifier (e.g., CVE-2024-12345) and includes a +standardized description, allowing organizations to track and address +vulnerabilities consistently. + +In the context of Docker, CVEs often pertain to issues within base images, or +application dependencies. These vulnerabilities can range from minor bugs to +critical security risks, such as remote code execution or privilege escalation. + +## Why are CVEs important? + +Regularly scanning and updating Docker images to mitigate CVEs is crucial for +maintaining a secure and compliant environment. Ignoring CVEs can lead to severe +security breaches, including: + +- Unauthorized access: Exploits can grant attackers unauthorized access to + systems. +- Data breaches: Sensitive information can be exposed or stolen. +- Service disruptions: Vulnerabilities can be leveraged to disrupt services or + cause downtime. +- Compliance violations: Failure to address known vulnerabilities can lead to + non-compliance with industry regulations and standards. + +## How Docker Hardened Images help mitigate CVEs + +Docker Hardened Images (DHIs) are crafted to minimize the risk of CVEs from the +outset. By adopting a security-first approach, DHIs offer several advantages in +CVE mitigation: + +- Reduced attack surface: DHIs are built using a distroless approach, stripping + away unnecessary components and packages. This reduction in image size, up to + 95% smaller than traditional images, limits the number of potential + vulnerabilities, making it harder for attackers to exploit unneeded software. + +- Faster CVE remediation: Maintained by Docker with an enterprise-grade SLA, + DHIs are continuously updated to address known vulnerabilities. Critical and + high-severity CVEs are patched quickly, ensuring that your containers remain + secure without manual intervention. + +- Proactive vulnerability management: By utilizing DHIs, organizations can + proactively manage vulnerabilities. The images come with CVE and Vulnerability + Exposure (VEX) feeds, enabling teams to stay informed about potential threats + and take necessary actions promptly. + +## Scan images for CVEs + +Regularly scanning Docker images for CVEs is essential for maintaining a secure +containerized environment. While Docker Scout is integrated into Docker Desktop +and the Docker CLI, tools like Grype and Trivy offer alternative scanning +capabilities. The following are instructions for using each tool to scan Docker +images for CVEs. + +### Docker Scout + +Docker Scout is integrated into Docker Desktop and the Docker CLI. It provides +vulnerability insights, CVE summaries, and direct links to remediation guidance. + +#### Scan a DHI using Docker Scout + +To scan a Docker Hardened Image using Docker Scout, run the following +command: + +```console +$ docker scout cves /dhi-: +``` + +Example output: + +```plaintext + v SBOM obtained from attestation, 101 packages found + v Provenance obtained from attestation + v VEX statements obtained from attestation + v No vulnerable package detected + ... +``` + +For more detailed filtering and JSON output, see [Docker Scout CLI reference](../../../reference/cli/docker/scout/_index.md). + +### Grype + +[Grype](https://github.com/anchore/grype) is an open-source scanner that checks +container images against vulnerability databases like the NVD and distro +advisories. + +#### Scan a DHI using Grype + +After installing Grype, you can scan a Docker Hardened Image by pulling +the image and running the scan command: + +```console +$ docker pull /dhi-: +$ grype /dhi-: +``` + +Example output: + +```plaintext +NAME INSTALLED FIXED-IN TYPE VULNERABILITY SEVERITY EPSS% RISK +libperl5.36 5.36.0-7+deb12u2 (won't fix) deb CVE-2023-31484 High 79.45 1.1 +perl 5.36.0-7+deb12u2 (won't fix) deb CVE-2023-31484 High 79.45 1.1 +perl-base 5.36.0-7+deb12u2 (won't fix) deb CVE-2023-31484 High 79.45 1.1 +... +``` + +### Trivy + +[Trivy](https://github.com/aquasecurity/trivy) is an open-source vulnerability +scanner for containers and other artifacts. It detects vulnerabilities in OS +packages and application dependencies. + +#### Scan a DHI using Trivy + +After installing Trivy, you can scan a Docker Hardened Image by pulling +the image and running the scan command: + +```console +$ docker pull /dhi-: +$ trivy image /dhi-: +``` + +Example output: + +```plaintext +Report Summary + +┌──────────────────────────────────────────────────────────────────────────────┬────────────┬─────────────────┬─────────┐ +│ Target │ Type │ Vulnerabilities │ Secrets │ +├──────────────────────────────────────────────────────────────────────────────┼────────────┼─────────────────┼─────────┤ +│ /dhi-: (debian 12.11) │ debian │ 66 │ - │ +├──────────────────────────────────────────────────────────────────────────────┼────────────┼─────────────────┼─────────┤ +│ opt/python-3.13.4/lib/python3.13/site-packages/pip-25.1.1.dist-info/METADATA │ python-pkg │ 0 │ - │ +└──────────────────────────────────────────────────────────────────────────────┴────────────┴─────────────────┴─────────┘ +``` + +## Use VEX to filter known non-exploitable CVEs + +Docker Hardened Images include signed [VEX (Vulnerability Exploitability +eXchange)](./vex.md) attestations that identify vulnerabilities not relevant to the image’s +runtime behavior. + +When using Docker Scout, these VEX statements are automatically applied and no +manual configuration needed. + +To manually retrieve the VEX attestation for tools that support it: + +```console +$ docker scout attest get \ + --predicate-type https://openvex.dev/ns/v0.2.0 \ + --predicate \ + /dhi-: --platform > vex.json +``` + +For example: + +```console +$ docker scout attest get \ + --predicate-type https://openvex.dev/ns/v0.2.0 \ + --predicate \ + docs/dhi-python:3.13 --platform linux/amd64 > vex.json +``` + +This creates a `vex.json` file containing the VEX statements for the specified +image. You can then use this file with tools that support VEX to filter out known non-exploitable CVEs. + +For example, with Grype and Trivy, you can use the `--vex` flag to apply the VEX +statements during the scan: + +```console +$ grype /dhi-: --vex vex.json +``` \ No newline at end of file diff --git a/content/manuals/dhi/core-concepts/digests.md b/content/manuals/dhi/core-concepts/digests.md new file mode 100644 index 000000000000..84a9f953a32b --- /dev/null +++ b/content/manuals/dhi/core-concepts/digests.md @@ -0,0 +1,126 @@ +--- +title: Digests +description: Learn how Docker Hardened Images help secure every stage of your software supply chain with signed metadata, provenance, and minimal attack surface. +keywords: docker image digest, pull image by digest, immutable container image, secure container reference, multi-platform manifest +--- + +## What are Docker image digests? + +A Docker image digest is a unique, cryptographic identifier (SHA-256 hash) +representing the content of a Docker image. Unlike tags, which can be reused or +changed, a digest is immutable and ensures that the exact same image is pulled +every time. This guarantees consistency across different environments and +deployments. + +For example, the digest for the `nginx:latest` image might look like: + +```text +sha256:94a00394bc5a8ef503fb59db0a7d0ae9e1110866e8aee8ba40cd864cea69ea1a +``` + +This digest uniquely identifies the specific version of the `nginx:latest` image, +ensuring that any changes to the image content result in a different digest. + +## Why are digests important? + +Using digests instead of tags offers several advantages: + +- Immutability: Once an image is built and its digest is generated, the content + tied to that digest cannot change. This means that if you pull an image using + its digest, you can be confident that you are retrieving exactly the same + image that was originally built. + +- Security: Digests help prevent supply chain attacks by ensuring that the image + content has not been tampered with. Even a small change in the image content + will result in a completely different digest. + +- Consistency: Using digests ensures that the same image is used across + different environments, reducing the risk of discrepancies between + development, staging, and production environments. + +## Docker Hardened Image digests + +By using digests to reference DHIs, you can ensure that your applications are +always using the exact same secure image version, enhancing security and +compliance + +## View an image digest + +### Use the Docker CLI + +To view the digest of a Docker image, you can use the following command. Replace +`:` with the image name and tag. + +```console +$ docker buildx imagetools inspect : +``` + +### Use the Docker Hub UI + +1. Go to [Docker Hub](https://hub.docker.com/) and sign in. +2. Navigate to your organization's namespace and open the mirrored DHI repository. +3. Select the **Tags** tab to view image variants. +4. Each tag in the list includes a **Digest** field showing the image's SHA-256 value. + +## Pull an image by digest + +Pulling an image by digest ensures that you are pulling the exact image version +identified by the specified digest. + +To pull a Docker image using its digest, use the following command. Replace +`` with the image name and `` with the image digest. + +```console +$ docker pull @sha256: +``` + +For example, to pull a `docs/dhi-python:3.13` image using its digest of +`94a00394bc5a8ef503fb59db0a7d0ae9e1110866e8aee8ba40cd864cea69ea1a`, you would +run: + +```console +$ docker pull docs/dhi-python@sha256:94a00394bc5a8ef503fb59db0a7d0ae9e1110866e8aee8ba40cd864cea69ea1a +``` + +## Multi-platform images and manifests + +Docker Hardened Images are published as multi-platform images, which means +a single image tag (like `docs/dhi-python:3.13`) can support multiple operating +systems and CPU architectures, such as `linux/amd64`, `linux/arm64`, and more. + +Instead of pointing to a single image, a multi-platform tag points to a manifest +list (also called an index), which is a higher-level object that references +multiple image digests, one for each supported platform. + +When you inspect a multi-platform image using `docker buildx imagetools inspect`, you'll see something like this: + +```text +Name: docs/dhi-python:3.13 +MediaType: application/vnd.docker.distribution.manifest.list.v2+json +Digest: sha256:6e05...d231 + +Manifests: + Name: docs/dhi-python:3.13@sha256:94a0...ea1a + Platform: linux/amd64 + ... + + Name: docs/dhi-python:3.13@sha256:7f1d...bc43 + Platform: linux/arm64 + ... +``` + +- The manifest list digest (`sha256:6e05...d231`) identifies the overall + multi-platform image. +- Each platform-specific image has its own digest (e.g., `sha256:94a0...ea1a` + for `linux/amd64`). + +### Why this matters + +- Reproducibility: If you're building or running containers on different + architectures, using a tag alone will resolve to the appropriate image digest + for your platform. +- Verification: You can pull and verify a specific image digest for your + platform to ensure you're using the exact image version, not just the manifest + list. +- Policy enforcement: When enforcing digest-based policies with Docker Scout, + each platform variant is evaluated individually using its digest. \ No newline at end of file diff --git a/content/manuals/dhi/core-concepts/distroless.md b/content/manuals/dhi/core-concepts/distroless.md new file mode 100644 index 000000000000..618a02b4c245 --- /dev/null +++ b/content/manuals/dhi/core-concepts/distroless.md @@ -0,0 +1,68 @@ +--- +title: Minimal or distroless images +linktitle: Distroless images +description: Learn how Docker Hardened Images use distroless variants to minimize attack surface and remove unnecessary components. +keywords: distroless container image, minimal docker image, secure base image, no shell container, reduced attack surface +--- + + +Minimal images, sometimes called distroless images, are container images +stripped of unnecessary components such as package managers, shells, or even the +underlying operating system distribution. Docker Hardened Images (DHI) embrace +this minimal approach to reduce vulnerabilities and enforce secure software +delivery. + +### What are minimal or distroless images? + +Traditional container images include a full OS, often more than what is needed +to run an application. In contrast, minimal or distroless images include only: + +- The application binary +- Its runtime dependencies (e.g., libc, Java, Python) +- Any explicitly required configuration or metadata + +They typically exclude: + +- OS tools (e.g., `ls`, `ps`, `cat`) +- Shells (e.g., `sh`, `bash`) +- Package managers (e.g., `apt`, `apk`) +- Debugging utilities (e.g., `curl`, `wget`, `strace`) + +Docker Hardened Images are based on this model, ensuring a smaller and more +secure runtime surface. + +### What you gain + +| Benefit | Description | +|------------------------|-------------------------------------------------------------------------------| +| Smaller attack surface | Fewer components mean fewer vulnerabilities and less exposure to CVEs | +| Faster startup | Smaller image sizes result in faster pull and start times | +| Improved security | Lack of shell and package manager limits what attackers can do if compromised | +| Better compliance | Easier to audit and verify, especially with SBOMs and attestations | + +### Addressing common tradeoffs + +Minimal and distroless images offer strong security benefits, but they can +change how you work with containers. Docker Hardened Images are designed to +maintain productivity while enhancing security. + +| Concern | How Docker Hardened Images help | +|-------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Debuggability | Hardened images exclude shells and CLI tools by default. Use [Docker Debug](../../../reference/cli/docker/debug.md) to temporarily attach a debug sidecar for troubleshooting without modifying the original container. | +| Familiarity | DHI supports multiple base images, including Alpine and Debian variants, so you can choose a familiar environment while still benefiting from hardening practices. | +| Flexibility | Runtime immutability helps secure your containers. Use multi-stage builds and CI/CD to control changes, and optionally use dev-focused base images during development. | + +By balancing minimalism with practical tooling, Docker Hardened Images support +modern development workflows without compromising on security or reliability. + +### Best practices for using minimal images + +- Use multi-stage builds to separate build-time and runtime environments +- Validate image behavior using CI pipelines, not interactive inspection +- Include runtime-specific dependencies explicitly in your Dockerfile +- Use Docker Scout to continuously monitor for CVEs, even in minimal images + +By adopting minimal or distroless images through Docker Hardened Images, you +gain a more secure, predictable, and production-ready container environment +that's designed for automation, clarity, and reduced risk. + diff --git a/content/manuals/dhi/core-concepts/fips.md b/content/manuals/dhi/core-concepts/fips.md new file mode 100644 index 000000000000..4b67a57c545e --- /dev/null +++ b/content/manuals/dhi/core-concepts/fips.md @@ -0,0 +1,108 @@ +--- +title: FIPS +description: Learn how Docker Hardened Images support FIPS 140 through validated cryptographic modules to help organizations meet compliance requirements. +keywords: docker fips, fips 140 images, fips docker images, docker compliance, secure container images +--- + +## What is FIPS 140? + +[FIPS 140](https://csrc.nist.gov/publications/detail/fips/140/3/final) is a U.S. +government standard that defines security requirements for cryptographic modules +that protect sensitive information. It is widely used in regulated environments +such as government, healthcare, and financial services. + +FIPS certification is managed by the [NIST Cryptographic Module Validation +Program +(CMVP)](https://csrc.nist.gov/projects/cryptographic-module-validation-program), +which ensures cryptographic modules meet rigorous security standards. + +## Why FIPS compliance matters + +FIPS 140 compliance is required or strongly recommended in many regulated +environments where sensitive data must be protected, such as government, +healthcare, finance, and defense. These standards ensure that cryptographic +operations are performed using vetted, trusted algorithms implemented in secure +modules. + +Using software components that rely on validated cryptographic modules can help organizations: + +- Satisfy federal and industry mandates, such as FedRAMP, which require or + strongly recommend FIPS 140-validated cryptography. +- Demonstrate audit readiness, with verifiable evidence of secure, + standards-based cryptographic practices. +- Reduce security risk, by blocking unapproved or unsafe algorithms (e.g., MD5) + and ensuring consistent behavior across environments. + +## How Docker Hardened Images support FIPS compliance + +Docker Hardened Images (DHIs) include variants that use cryptographic modules +validated under FIPS 140. These images are intended to help organizations meet +compliance requirements by incorporating components that meet the standard. + +- FIPS image variants use cryptographic modules that are already validated under + FIPS 140. +- These variants are built and maintained by Docker to support environments with + regulatory or compliance needs. +- Docker provides signed test attestations that document the use of validated + cryptographic modules. These attestations can support internal audits and + compliance reporting. + +> [!NOTE] +> +> Using a FIPS image variant helps meet compliance requirements but does not +> make an application or system fully compliant. Compliance depends on how the +> image is integrated and used within the broader system. + +## Identify images that support FIPS + +Docker Hardened Images that support FIPS are marked as **FIPS** compliant +in the Docker Hardened Images catalog. + +To find DHI repositories with FIPS image variants, [explore images](../how-to/explore.md) and: + +- Use the **FIPS** filter on the catalog page +- Look for **FIPS** compliant on individual image listings + +These indicators help you quickly locate repositories that support FIPS-based +compliance needs. Image variants that include FIPS support will have a tag +ending with `-fips`, such as `3.13-fips`. + +## Validate FIPS-related tests using attestations + +Docker Hardened Images include a signed [test +attestation](../core-concepts/attestations.md) that documents the results of +automated image validation. For FIPS variants, this includes test cases that +verify whether the image uses FIPS-validated cryptographic modules. + +You can retrieve and inspect this attestation using the Docker Scout CLI: + +```console +$ docker scout attest get \ + --predicate-type https://scout.docker.com/tests/v0.1 \ + --predicate \ + /dhi-: --platform +``` + +For example: + +```console +$ docker scout attest get \ + --predicate-type https://scout.docker.com/tests/v0.1 \ + --predicate \ + docs/dhi-python:3.13-fips --platform linux/amd64 +``` + +The output is a structured JSON report. Individual test outputs are +base64-encoded under fields like `stdout`. You can decode them to review the raw +test output. + +To decode and view test results: + +```console +$ docker scout attest get \ + --predicate-type https://scout.docker.com/tests/v0.1 \ + --predicate \ + docs/dhi-python:3.13-fips --platform linux/amd64 \ + | jq -r '.results.tests[].extra.stdout' \ + | base64 -d +``` \ No newline at end of file diff --git a/content/manuals/dhi/core-concepts/glibc-musl.md b/content/manuals/dhi/core-concepts/glibc-musl.md new file mode 100644 index 000000000000..1ef7cdfa45f4 --- /dev/null +++ b/content/manuals/dhi/core-concepts/glibc-musl.md @@ -0,0 +1,58 @@ +--- +title: glibc and musl support in Docker Hardened Images +linktitle: glibc and musl +description: Compare glibc and musl variants of DHIs to choose the right base image for your application’s compatibility, size, and performance needs. +keywords: glibc vs musl, alpine musl image, debian glibc container, docker hardened images compatibility, c library in containers +--- + +Docker Hardened Images (DHI) are built to prioritize security without +sacrificing compatibility with the broader open source and enterprise software +ecosystem. A key aspect of this compatibility is support for common Linux +standard libraries: `glibc` and `musl`. + +## What are glibc and musl? + +When you run Linux-based containers, the image's C library plays a key role in +how applications interact with the operating system. Most modern Linux +distributions rely on one of the following standard C libraries: + +- `glibc` (GNU C Library): The standard C library on mainstream distributions + like Debian, Ubuntu, and Red Hat Enterprise Linux. It is widely supported and + typically considered the most compatible option across languages, frameworks, + and enterprise software. + +- `musl`: A lightweight alternative to `glibc`, commonly used in minimal + distributions like Alpine Linux. While it offers smaller image sizes and + performance benefits, `musl` is not always fully compatible with software that + expects `glibc`. + +## DHI compatibility + +DHI images are available in both `glibc`-based (e.g., Debian) and `musl`-based +(e.g., Alpine) variants. For enterprise applications and language runtimes where +compatibility is critical, we recommend using DHI images based on glibc. + +## What to choose, glibc or musl? + +Docker Hardened Images are available in both glibc-based (Debian) and musl-based +(Alpine) variants, allowing you to choose the best fit for your workload. + +Choose Debian-based (`glibc`) images if: + +- You need broad compatibility with enterprise workloads, language runtimes, or + proprietary software. +- You're using ecosystems like .NET, Java, or Python with native extensions that + depend on `glibc`. +- You want to minimize the risk of runtime errors due to library + incompatibilities. + +Choose Alpine-based (`musl`) images if: + +- You want a minimal footprint with smaller image sizes and reduced surface + area. +- You're building a custom or tightly controlled application stack where + dependencies are known and tested. +- You prioritize startup speed and lean deployments over maximum compatibility. + +If you're unsure, start with a Debian-based image to ensure compatibility, and +evaluate Alpine once you're confident in your application's dependencies. \ No newline at end of file diff --git a/content/manuals/dhi/core-concepts/hardening.md b/content/manuals/dhi/core-concepts/hardening.md new file mode 100644 index 000000000000..185226cb3473 --- /dev/null +++ b/content/manuals/dhi/core-concepts/hardening.md @@ -0,0 +1,74 @@ +--- +title: Base image hardening +linktitle: Hardening +description: Learn how Docker Hardened Images are designed for security, with minimal components, nonroot execution, and secure-by-default configurations. +keywords: hardened base image, minimal container image, non-root containers, secure container configuration, remove package manager +--- + +## What is base image hardening? + +Base image hardening is the process of securing the foundational layers of a +container image by minimizing what they include and configuring them with +security-first defaults. A hardened base image removes unnecessary components, +like shells, compilers, and package managers, which limits the available attack +surface, making it more difficult for an attacker to gain control or escalate +privileges inside the container. + +Hardening also involves applying best practices like running as a non-root user, +reducing writable surfaces, and ensuring consistency through immutability. + +## Why is it important? + +Most containers inherit their security posture from the base image they use. If +the base image includes unnecessary tools or runs with elevated privileges, +every container built on top of it is exposed to those risks. + +Hardening the base image: + +- Reduces the attack surface by removing tools and libraries that could be exploited +- Enforces least privilege by dropping root access and restricting what the container can do +- Improves reliability and consistency by avoiding runtime changes and drift +- Aligns with secure software supply chain practices and helps meet compliance standards + +Using hardened base images is a critical first step in securing the software you +build and run in containers. + +## What's removed and why + +Hardened images typically exclude common components that are risky or unnecessary in secure production environments: + +| Removed component | Reason | +|--------------------------------------------------|----------------------------------------------------------------------------------| +| Shells (e.g., `sh`, `bash`) | Prevents users or attackers from executing arbitrary commands inside containers | +| Package managers (e.g., `apt`, `apk`) | Disables the ability to install software post-build, reducing drift and exposure | +| Compilers and interpreters | Avoids introducing tools that could be used to run or inject malicious code | +| Debugging tools (e.g., `strace`, `curl`, `wget`) | Reduces risk of exploitation or information leakage | +| Unused libraries or locales | Shrinks image size and minimizes attack vectors | + +## How Docker Hardened Images apply base image hardening + +Docker Hardened Images (DHIs) apply base image hardening principles by design. +Each image is constructed to include only what is necessary for its specific +purpose, whether that’s building applications (with `-dev` or `-sdk` tags) or +running them in production. + +### Docker Hardened Image traits + +Docker Hardened Images are built to be: + +- Minimal: Only essential libraries and binaries are included +- Immutable: Images are fixed at build time—no runtime installations +- Non-root by default: Containers run as an unprivileged user unless configured otherwise +- Purpose-scoped: Different tags are available for development (`-dev`), SDK-based builds (`-sdk`), and production runtime + +These characteristics help enforce consistent, secure behavior across development, testing, and production environments. + +### Docker Hardened Image compatibility considerations + +Because Docker Hardened Images strip out many common tools, they may not work out of the box for all use cases. You may need to: + +- Use multi-stage builds to compile code or install dependencies in a `-dev` image and copy the output into a hardened runtime image +- Replace shell scripts with equivalent entrypoint binaries or explicitly include a shell if needed +- Use [Docker Debug](../../../reference/cli/docker/debug.md) to temporarily inspect or troubleshoot containers without altering the base image + +These trade-offs are intentional and help support best practices for building secure, reproducible, and production-ready containers. \ No newline at end of file diff --git a/content/manuals/dhi/core-concepts/immutability.md b/content/manuals/dhi/core-concepts/immutability.md new file mode 100644 index 000000000000..c6cfb1144684 --- /dev/null +++ b/content/manuals/dhi/core-concepts/immutability.md @@ -0,0 +1,57 @@ +--- +title: Immutable infrastructure +linktitle: Immutability +description: Understand how image digests, read-only containers, and signed metadata ensure Docker Hardened Images are tamper-resistant and immutable. +keywords: immutable container image, read-only docker image, configuration drift prevention, secure redeployment, image digest verification +--- + +Immutable infrastructure is a security and operations model where components +such as servers, containers, and images are never modified after deployment. +Instead of patching or reconfiguring live systems, you replace them entirely +with new versions. + +When using Docker Hardened Images, immutability is a best practice that +reinforces the security posture of your software supply chain. + +## Why immutability matters + +Mutable systems are harder to secure and audit. Live patching or manual updates +introduce risks such as: + +- Configuration drift +- Untracked changes +- Inconsistent environments +- Increased attack surface + +Immutable infrastructure solves this by making changes only through controlled, +repeatable builds and deployments. + +## How Docker Hardened Images support immutability + +Docker Hardened Images are built to be minimal, locked-down, and +non-interactive, which discourages in-place modification. For example: + +- Many DHI images exclude shells, package managers, and debugging tools +- DHI images are designed to be scanned and signed before deployment +- DHI users are encouraged to rebuild and redeploy images rather than patch running containers + +This design aligns with immutable practices and ensures that: + +- Updates go through the CI/CD pipeline +- All changes are versioned and auditable +- Systems can be rolled back or reproduced consistently + +## Immutable patterns in practice + +Some common patterns that leverage immutability include: + +- Container replacement: Instead of logging into a container to fix a bug or + apply a patch, rebuild the image and redeploy it. +- Infrastructure as Code (IaC): Define your infrastructure and image + configurations in version-controlled files. +- Blue/Green or Canary deployments: Roll out new images alongside old ones and + gradually shift traffic to the new version. + +By combining immutable infrastructure principles with hardened images, you +create a predictable and secure deployment workflow that resists tampering and +minimizes long-term risk. \ No newline at end of file diff --git a/content/manuals/dhi/core-concepts/provenance.md b/content/manuals/dhi/core-concepts/provenance.md new file mode 100644 index 000000000000..5c8adcb425e4 --- /dev/null +++ b/content/manuals/dhi/core-concepts/provenance.md @@ -0,0 +1,71 @@ +--- +title: Image provenance +description: Learn how build provenance metadata helps trace the origin of Docker Hardened Images and support compliance with SLSA. +keywords: image provenance, container build traceability, slsa compliance, signed container image, software supply chain trust +--- + +## What is image provenance? + +Image provenance refers to metadata that traces the origin, authorship, and +integrity of a container image. It answers critical questions such as: + +- Where did this image come from? +- Who built it? +- Has it been tampered with? + +Provenance establishes a chain of custody, helping you verify that the image +you're using is the result of a trusted and verifiable build process. + +## Why image provenance matters + +Provenance is foundational to securing your software supply chain. Without it, you risk: + +- Running unverified or malicious images +- Failing to meet internal or regulatory compliance requirements +- Losing visibility into the components and workflows that produce your containers + +With reliable provenance, you gain: + +- Trust: Know that your images are authentic and unchanged. +- Traceability: Understand the full build process and source inputs. +- Auditability: Provide verifiable evidence of compliance and build integrity. + +Provenance also supports automated policy enforcement and is a key requirement +for frameworks like SLSA (Supply-chain Levels for Software Artifacts). + +## How Docker Hardened Images support provenance + +Docker Hardened Images (DHIs) are designed with built-in provenance to help you +adopt secure-by-default practices and meet supply chain security standards. + +### Attestations + +DHIs include [attestations](./attestations.md)—machine-readable metadata that +describe how, when, and where the image was built. These are generated using +industry standards such as [in-toto](https://in-toto.io/) and align with [SLSA +provenance](https://slsa.dev/spec/v1.0/provenance/). + +Attestations allow you to: + +- Validate that builds followed the expected steps +- Confirm that inputs and environments meet policy +- Trace the build process across systems and stages + +### Code signing + +Each Docker Hardened Image is cryptographically [signed](./signatures.md) and +stored in the registry alongside its digest. These signatures are verifiable +proofs of authenticity and are compatible with tools like `cosign`, Docker +Scout, and Kubernetes admission controllers. + +With image signatures, you can: + +- Confirm that the image was published by Docker +- Detect if an image has been modified or republished +- Enforce signature validation in CI/CD or production deployments + +## Additional resources + +- [Provenance attestations](/build/metadata/attestations/slsa-provenance/) +- [Image signatures](./signatures.md) +- [Attestations overview](./attestations.md) \ No newline at end of file diff --git a/content/manuals/dhi/core-concepts/sbom.md b/content/manuals/dhi/core-concepts/sbom.md new file mode 100644 index 000000000000..18c6dd876570 --- /dev/null +++ b/content/manuals/dhi/core-concepts/sbom.md @@ -0,0 +1,92 @@ +--- +title: Software Bill of Materials (SBOMs) +linktitle: SBOMs +description: Learn what SBOMs are, why they matter, and how Docker Hardened Images include signed SBOMs to support transparency and compliance. +keywords: sbom docker image, software bill of materials, signed sbom, container sbom verification, sbom compliance +--- + +## What is an SBOM? + +An SBOM is a detailed inventory that lists all components, libraries, and +dependencies used in building a software application. It provides transparency +into the software supply chain by documenting each component's version, origin, +and relationship to other components. Think of it as a "recipe" for your +software, detailing every ingredient and how they come together. + +Metadata included in an SBOM for describing software artifacts may include: + +- Name of the artifact +- Version +- License type +- Authors +- Unique package identifier + +## Why are SBOMs important? + +In today's software landscape, applications often comprise numerous components +from various sources, including open-source libraries, third-party services, and +proprietary code. This complexity can obscure visibility into potential +vulnerabilities and complicate compliance efforts. SBOMs address these +challenges by providing a detailed inventory of all components within an +application. + + +The significance of SBOMs is underscored by several key factors: + +- Enhanced transparency: SBOMs offer a comprehensive view of all components that + constitute an application, enabling organizations to identify and assess risks + associated with third-party libraries and dependencies. + +- Proactive vulnerability management: By maintaining an up-to-date SBOM, + organizations can swiftly identify and address vulnerabilities in software + components, reducing the window of exposure to potential exploits. + +- Regulatory compliance: Many regulations and industry standards now require + organizations to maintain control over the software components they use. An + SBOM facilitates compliance by providing a clear and accessible record. + +- Improved incident response: In the event of a security breach, an SBOM + enables organizations to quickly identify affected components and take + appropriate action, minimizing potential damage. + +## Docker Hardened Image SBOMs + +Docker Hardened Images come with built-in SBOMs, ensuring that every component +in the image is documented and verifiable. These SBOMs are cryptographically +signed, providing a tamper-evident record of the image's contents. This +integration simplifies audits and enhances trust in the software supply chain. + +## View SBOMs in Docker Hardened Images + +To view the SBOM of a Docker Hardened Image, you can use the `docker scout sbom` +command. Replace `:` with the image name and tag. + +```console +$ docker scout sbom : +``` + +## Verify the SBOM of a Docker Hardened Image + +Since Docker Hardened Images come with signed SBOMs, you can use Docker Scout to +verify the authenticity and integrity of the SBOM attached to the image. This +ensures that the SBOM has not been tampered with and that the image's contents +are trustworthy. + +To verify the SBOM of a Docker Hardened Image using Docker Scout, use the following command: + +```console +$ docker scout attest get : \ + --predicate-type https://scout.docker.com/sbom/v0.1 --verify --platform +``` + +For example, to verify the SBOM attestation for the `dhi/node:20.19-debian12-fips-20250701182639` image: + +```console +$ docker scout attest get docs/dhi-node:20.19-debian12-fips-20250701182639 \ + --predicate-type https://scout.docker.com/sbom/v0.1 --verify --platform linux/amd64 +``` + +## Resources + +For more details about SBOM attestations and Docker Build, see [SBOM +attestations](/build/metadata/attestations/sbom/). \ No newline at end of file diff --git a/content/manuals/dhi/core-concepts/signatures.md b/content/manuals/dhi/core-concepts/signatures.md new file mode 100644 index 000000000000..4e2324ae4f7b --- /dev/null +++ b/content/manuals/dhi/core-concepts/signatures.md @@ -0,0 +1,95 @@ +--- +title: Code signing +description: Understand how Docker Hardened Images are cryptographically signed using Cosign to verify authenticity, integrity, and secure provenance. +keywords: container image signing, cosign docker image, verify image signature, signed container image, sigstore cosign +--- + +## What is code signing? + +Code signing is the process of applying a cryptographic signature to software +artifacts, such as Docker images, to verify their integrity and authenticity. By +signing an image, you ensure that it has not been altered since it was signed +and that it originates from a trusted source. + +In the context of Docker Hardened Images (DHIs), code signing is achieved using +[Cosign](https://docs.sigstore.dev/), a tool developed by the Sigstore project. +Cosign enables secure and verifiable signing of container images, enhancing +trust and security in the software supply chain. + +## Why is code signing important? + +Code signing plays a crucial role in modern software development and +cybersecurity: + +- Authenticity: Verifies that the image was created by a trusted source. +- Integrity: Ensures that the image has not been tampered with since it was + signed. +- Compliance: Helps meet regulatory and organizational security requirements. + +## Docker Hardened Image code signing + +Each DHI is cryptographically signed using Cosign, ensuring that the images have +not been tampered with and originate from a trusted source. + +## Why sign your own images? + +Docker Hardened Images are signed by Docker to prove their origin and integrity, +but if you're building application images that extend or use DHIs as a base, you +should sign your own images as well. + +By signing your own images, you can: + +- Prove the image was built by your team or pipeline +- Ensure your build hasn't been tampered with after it's pushed +- Support software supply chain frameworks like SLSA +- Enable image verification in deployment workflows + +This is especially important in CI/CD environments where you build and push +images frequently, or in any scenario where image provenance must be auditable. + +## How to view and use code signatures + +### View signatures + +You can verify that a Docker Hardened Image is signed and trusted using either Docker Scout or Cosign. + +To lists all attestations, including signature metadata, attached to the image, use the following command: + +```console +$ docker scout attest list : --platform +``` + +To verify a specific signed attestation (e.g., SBOM, VEX, provenance): + +```console +$ docker scout attest get \ + --predicate-type \ + --verify \ + : --platform +``` + +For example: + +```console +$ docker scout attest get \ + --predicate-type https://openvex.dev/ns/v0.2.0 \ + --verify \ + docs/dhi-python:3.13 --platform linux/amd64 +``` + + +If valid, Docker Scout will confirm the signature and display signature payload, as well as the equivalent Cosign command to verify the image. + +### Sign images + +To sign a Docker image, use [Cosign](https://docs.sigstore.dev/). Replace +`:` with the image name and tag. + +```console +$ cosign sign : +``` + +This command will prompt you to authenticate via an OIDC provider (such as +GitHub, Google, or Microsoft). Upon successful authentication, Cosign will +generate a short-lived certificate and sign the image. The signature will be +stored in a transparency log and associated with the image in the registry. \ No newline at end of file diff --git a/content/manuals/dhi/core-concepts/slsa.md b/content/manuals/dhi/core-concepts/slsa.md new file mode 100644 index 000000000000..7178a368a043 --- /dev/null +++ b/content/manuals/dhi/core-concepts/slsa.md @@ -0,0 +1,104 @@ +--- +title: Supply-chain Levels for Software Artifacts (SLSA) +linktitle: SLSA +description: Learn how Docker Hardened Images comply with SLSA Build Level 3 and how to verify provenance for secure, tamper-resistant builds. +keywords: slsa docker compliance, slsa build level 3, supply chain security, verified build provenance, secure container build +--- + +## What is SLSA? + +Supply-chain Levels for Software Artifacts (SLSA) is a security framework +designed to enhance the integrity and security of software supply chains. +Developed by Google and maintained by the Open Source Security Foundation +(OpenSSF), SLSA provides a set of guidelines and best practices to prevent +tampering, improve integrity, and secure packages and infrastructure in software +projects. + +SLSA defines [four build levels (0–3)](https://slsa.dev/spec/latest/levels) of +increasing security rigor, focusing on areas such as build provenance, source +integrity, and build environment security. Each level builds upon the previous +one, offering a structured approach to achieving higher levels of software +supply chain security. + +## Why is SLSA important? + +SLSA is crucial for modern software development due to the increasing complexity +and interconnectedness of software supply chains. Supply chain attacks, such as +the SolarWinds breach, have highlighted the vulnerabilities in software +development processes. By implementing SLSA, organizations can: + +- Ensure artifact integrity: Verify that software artifacts have not been + tampered with during the build and deployment processes. + +- Enhance build provenance: Maintain verifiable records of how and when software + artifacts were produced, providing transparency and accountability. + +- Secure build environments: Implement controls to protect build systems from + unauthorized access and modifications. + +- Mitigate supply chain risks: Reduce the risk of introducing vulnerabilities or + malicious code into the software supply chain. + +## What is SLSA Build Level 3? + +SLSA Build Level 3, Hardened Builds, is the highest of four progressive levels in +the SLSA framework. It introduces strict requirements to ensure that software +artifacts are built securely and traceably. To meet Level 3, a build must: + +- Be fully automated and scripted to prevent manual tampering +- Use a trusted build service that enforces source and builder authentication +- Generate a signed, tamper-resistant provenance record describing how the artifact was built +- Capture metadata about the build environment, source repository, and build steps + +This level provides strong guarantees that the software was built from the +expected source in a controlled, auditable environment, which significantly +reduces the risk of supply chain attacks. + +## Docker Hardened Images and SLSA + +Docker Hardened Images (DHIs) are secure-by-default container images +purpose-built for modern production environments. Each DHI is cryptographically +signed and complies with the [SLSA Build Level 3 +standard](https://slsa.dev/spec/latest/levels#build-l3-hardened-builds), ensuring +verifiable build provenance and integrity. + +By integrating SLSA-compliant DHIs into your development and deployment processes, you can: + +- Achieve higher security levels: Utilize images that meet stringent security + standards, reducing the risk of vulnerabilities and attacks. + +- Simplify compliance: Leverage built-in features like signed Software Bills of + Materials (SBOMs) and vulnerability exception (VEX) statements to facilitate + compliance with regulations such as FedRAMP. + +- Enhance transparency: Access detailed information about the components and + build process of each image, promoting transparency and trust. + +- Streamline audits: Utilize verifiable build records and signatures to simplify + security audits and assessments. + +## Get and verify SLSA provenance for Docker Hardened Images + +Each Docker Hardened Image (DHI) is cryptographically signed and includes +attestations. These attestations provide verifiable build provenance and +demonstrate adherence to SLSA Build Level 3 standards. + +To get and verify SLSA provenance for a DHI, you can use Docker Scout. + +```console +$ docker scout attest get /dhi-: \ + --predicate-type https://slsa.dev/provenance/v0.2 \ + --verify +``` + +For example: + +```console +$ docker scout attest get docs/dhi-node:20.19-debian12-fips-20250701182639 \ + --predicate-type https://slsa.dev/provenance/v0.2 \ + --verify +``` + +## Resources + +For more details about SLSA definitions and Docker Build, see [SLSA definitions](/build/metadata/attestations/slsa-definitions/). \ No newline at end of file diff --git a/content/manuals/dhi/core-concepts/sscs.md b/content/manuals/dhi/core-concepts/sscs.md new file mode 100644 index 000000000000..bd6a58b1d677 --- /dev/null +++ b/content/manuals/dhi/core-concepts/sscs.md @@ -0,0 +1,52 @@ +--- +title: Software Supply Chain Security +linktitle: Software Supply Chain Security +description: Learn how Docker Hardened Images help secure every stage of your software supply chain with signed metadata, provenance, and minimal attack surface. +keywords: software supply chain security, secure container images, signed image provenance, docker sbom, distroless security +--- + +## What is Software Supply Chain Security (SSCS)? + +SSCS encompasses practices and strategies designed to safeguard the entire +lifecycle of software development from initial code creation to deployment and +maintenance. It focuses on securing all components. This includes code, +dependencies, build processes, and distribution channels in order to prevent +malicious actors from compromising the software supply chain. Given the +increasing reliance on open-source libraries and third-party components, +ensuring the integrity and security of these elements is paramount + +## Why is SSCS important? + +The significance of SSCS has escalated due to the rise in sophisticated +cyberattacks targeting software supply chains. Recent incidents and the +exploitation of vulnerabilities in open-source components have underscored the +critical need for robust supply chain security measures. Compromises at any +stage of the software lifecycle can lead to widespread vulnerabilities, data +breaches, and significant financial losses. + +## How Docker Hardened Images contribute to SSCS + +Docker Hardened Images (DHI) are purpose-built container images designed with +security at their core, addressing the challenges of modern software supply +chain security. By integrating DHI into your development and deployment +pipelines, you can enhance your organization's SSCS posture through the +following features: + +- Minimal attack surface: DHIs are engineered to be ultra-minimal, stripping + away unnecessary components and reducing the attack surface by up to 95%. This + distroless approach minimizes potential entry points for malicious actors. + +- Cryptographic signing and provenance: Each DHI is cryptographically signed, + ensuring authenticity and integrity. Build provenance is maintained, providing + verifiable evidence of the image's origin and build process, aligning with + standards like SLSA (Supply-chain Levels for Software Artifacts). + +- Software Bill of Materials (SBOM): DHIs include a comprehensive SBOM, + detailing all components and dependencies within the image. This transparency + aids in vulnerability management and compliance tracking, enabling teams to + assess and mitigate risks effectively. + +- Continuous maintenance and rapid CVE remediation: Docker maintains DHIs with + regular updates and security patches, backed by an SLA for addressing critical + and high-severity vulnerabilities. This proactive approach helps ensure that + images remain secure and compliant with enterprise standards. \ No newline at end of file diff --git a/content/manuals/dhi/core-concepts/ssdlc.md b/content/manuals/dhi/core-concepts/ssdlc.md new file mode 100644 index 000000000000..eb90c6d3571c --- /dev/null +++ b/content/manuals/dhi/core-concepts/ssdlc.md @@ -0,0 +1,113 @@ +--- +title: Secure Software Development Lifecycle +linktitle: SSDLC +description: See how Docker Hardened Images support a secure SDLC by integrating with scanning, signing, and debugging tools. +keywords: secure software development, ssdlc containers, slsa compliance, docker scout integration, secure container debugging +--- + +## What is a Secure Software Development Lifecycle? + +A Secure Software Development Lifecycle (SSDLC) integrates security practices +into every phase of software delivery, from design and development to deployment +and monitoring. It’s not just about writing secure code, but about embedding +security throughout the tools, environments, and workflows used to build and +ship software. + +SSDLC practices are often guided by compliance frameworks, organizational +policies, and supply chain security standards such as SLSA (Supply-chain Levels +for Software Artifacts) or NIST SSDF. + +## Why SSDLC matters + +Modern applications depend on fast, iterative development, but rapid delivery +often introduces security risks if protections aren’t built in early. An SSDLC +helps: + +- Prevent vulnerabilities before they reach production +- Ensure compliance through traceable and auditable workflows +- Reduce operational risk by maintaining consistent security standards +- Enable secure automation in CI/CD pipelines and cloud-native environments + +By making security a first-class citizen in each stage of software delivery, +organizations can shift left and reduce both cost and complexity. + +## How Docker supports a secure SDLC + +Docker provides tools and secure content that make SSDLC practices easier to +adopt across the container lifecycle. With [Docker Hardened +Images](../_index.md) (DHIs), [Docker +Debug](../../../reference/cli/docker/debug.md), and [Docker +Scout](../../../manuals/scout/_index.md), teams can add security without losing +velocity. + +### Plan and design + +During planning, teams define architectural constraints, compliance goals, and +threat models. Docker Hardened Images help at this stage by providing: + +- Secure-by-default base images for common languages and runtimes +- Verified metadata including SBOMs, provenance, and VEX documents +- Support for both glibc and musl across multiple Linux distributions + +You can use DHI metadata and attestations to support design reviews, threat +modeling, or architecture sign-offs. + +### Develop + +In development, security should be transparent and easy to apply. Docker +Hardened Images support secure-by-default development: + +- Dev variants include shells, package managers, and compilers for convenience +- Minimal runtime variants reduce attack surface in final images +- Multi-stage builds let you separate build-time tools from runtime environments + +[Docker Debug](../../../reference/cli/docker/debug.md) helps developers: + +- Temporarily inject debugging tools into minimal containers +- Avoid modifying base images during troubleshooting +- Investigate issues securely, even in production-like environments + +### Build and test + +Build pipelines are an ideal place to catch issues early. Docker Scout +integrates with Docker Hub and the CLI to: + +- Scan for known CVEs using multiple vulnerability databases +- Trace vulnerabilities to specific layers and dependencies +- Interpret signed VEX data to suppress known-irrelevant issues +- Export JSON scan reports for CI/CD workflows + +Build pipelines that use Docker Hardened Images benefit from: + +- Reproducible, signed images +- Minimal build surfaces to reduce exposure +- Built-in compliance with SLSA Build Level 3 standards + +### Release and deploy + +Security automation is critical as you release software at scale. Docker +supports this phase by enabling: + +- Signature verification and provenance validation before deployment +- Policy enforcement gates using Docker Scout +- Safe, non-invasive container inspection using Docker Debug + +DHIs ship with the metadata and signatures required to automate image +verification during deployment. + +### Monitor and improve + +Security continues after release. With Docker tools, you can: + +- Continuously monitor image vulnerabilities through Docker Hub +- Get CVE remediation guidance and patch visibility using Docker Scout +- Receive updated DHI images with rebuilt and re-signed secure layers +- Debug running workloads with Docker Debug without modifying the image + +## Summary + +Docker helps teams embed security throughout the SSDLC by combining secure +content (DHIs) with developer-friendly tooling (Docker Scout and Docker Debug). +These integrations promote secure practices without introducing friction, making +it easier to adopt compliance and supply chain security across your software +delivery lifecycle. \ No newline at end of file diff --git a/content/manuals/dhi/core-concepts/vex.md b/content/manuals/dhi/core-concepts/vex.md new file mode 100644 index 000000000000..0c334dfc9cb4 --- /dev/null +++ b/content/manuals/dhi/core-concepts/vex.md @@ -0,0 +1,90 @@ +--- +title: Vulnerability Exploitability eXchange (VEX) +linktitle: VEX +description: Learn how VEX helps you prioritize real risks by identifying which vulnerabilities in Docker Hardened Images are actually exploitable. +keywords: vex container security, vulnerability exploitability, filter false positives, docker scout vex, cve prioritization +--- + +## What is VEX? + +Vulnerability Exploitability eXchange (VEX) is a standardized framework +developed by the U.S. Cybersecurity and Infrastructure Security Agency (CISA) to +document the exploitability of vulnerabilities within software components. +Unlike traditional CVE (Common Vulnerabilities and Exposures) databases, VEX +provides contextual assessments, indicating whether a vulnerability is +exploitable in a given environment. This approach helps organizations prioritize +remediation efforts by distinguishing between vulnerabilities that are +exploitable and those that are not relevant to their specific use cases. + +## Why is VEX important? + +VEX enhances traditional vulnerability management by: + +- Reducing false positives: By providing context-specific assessments, VEX helps + in filtering out vulnerabilities that do not pose a threat in a particular + environment. + +- Prioritizing remediation: Organizations can focus resources on addressing + vulnerabilities that are exploitable in their specific context, improving + efficiency in vulnerability management. + +- Enhancing compliance: VEX reports provide detailed information that can assist + in meeting regulatory requirements and internal security standards. + +This approach is particularly beneficial in complex environments where numerous +components and configurations exist, and traditional CVE-based assessments may +lead to unnecessary remediation efforts. + +## How Docker Hardened Images integrate VEX + +To enhance vulnerability management, Docker Hardened Images (DHI) incorporate +VEX reports, providing context-specific assessments of known vulnerabilities. + +This integration allows you to: + +- Assess exploitability: Determine whether known vulnerabilities in the image's +components are exploitable in their specific environment. + +- Prioritize actions: Focus remediation efforts on vulnerabilities that pose + actual risks, optimizing resource allocation. + +- Streamline audits: Utilize the detailed information provided by VEX reports to + simplify compliance audits and reporting. + +By combining the security features of DHI with the contextual insights of VEX, +organizations can achieve a more effective and efficient approach to +vulnerability management. + +## Use VEX to filter known non-exploitable CVEs + +When using Docker Scout, VEX statements are automatically applied and no +manual configuration is needed. + +To manually retrieve the VEX attestation for tools that support it: + +```console +$ docker scout attest get \ + --predicate-type https://openvex.dev/ns/v0.2.0 \ + --predicate \ + /dhi-: --platform > vex.json +``` + +For example: + +```console +$ docker scout attest get \ + --predicate-type https://openvex.dev/ns/v0.2.0 \ + --predicate \ + docs/dhi-python:3.13 --platform linux/amd64 > vex.json +``` + +This creates a `vex.json` file containing the VEX statements for the specified +image. You can then use this file with tools that support VEX to filter out +known non-exploitable CVEs. + +For example, with Grype and Trivy, you can use the `--vex` flag to apply the VEX +statements during the scan: + +```console +$ grype /dhi-: --vex vex.json +``` \ No newline at end of file diff --git a/content/manuals/dhi/features/_index.md b/content/manuals/dhi/features/_index.md new file mode 100644 index 000000000000..ecb99bf1d15e --- /dev/null +++ b/content/manuals/dhi/features/_index.md @@ -0,0 +1,39 @@ +--- +title: Features +description: Explore the core features of Docker Hardened Images, including hardened defaults, secure metadata, and ecosystem compatibility. +weight: 10 +params: + grid_features: + - title: Hardened, secure images + description: Learn how Docker Hardened Images reduce vulnerabilities, enforce non-root execution, and include SLSA-compliant metadata for supply chain security. + icon: lock + link: /dhi/features/secure/ + - title: Seamless integration + description: See how Docker Hardened Images integrate with CI/CD pipelines, vulnerability scanners, and container registries across your toolchain. + icon: hub + link: /dhi/features/integration/ + - title: Enterprise support + description: Learn about enterprise support and SLA-driven updates. + icon: settings + link: /dhi/features/support/ + - title: Continuous patching and secure maintenance + description: Learn how Docker Hardened Images are continuously updated with security patches, ensuring your images remain secure over time. + icon: dashboard + link: /dhi/features/patching/ + - title: Flexible, repository-based pricing + description: Learn how Docker Hardened Images offer repository-based flexibility with no per-image or per-pull limitations. + icon: wallet + link: /dhi/features/flexible/ +--- + +Docker Hardened Images (DHIs) go beyond minimal base and application images by +incorporating hardened defaults, signed metadata, and broad ecosystem +compatibility. Whether you're securing a single service or rolling out +compliance controls at scale, this section covers the key features that make +DHIs production-ready. + +## Explore core features + +{{< grid + items="grid_features" +>}} \ No newline at end of file diff --git a/content/manuals/dhi/features/flexible.md b/content/manuals/dhi/features/flexible.md new file mode 100644 index 000000000000..a5dcdcbed532 --- /dev/null +++ b/content/manuals/dhi/features/flexible.md @@ -0,0 +1,44 @@ +--- +title: Flexible, repository-based pricing +linktitle: Flexibility +description: Understand how Docker Hardened Images give you cost control by charging only for what you mirror and use. +keywords: docker hardened images pricing, per repo billing, flexible pricing model, mirror image pricing, container pricing model +weight: 30 +--- + +Docker Hardened Images are designed not only for security and compliance, but +also for operational and financial efficiency. With a model that charges per +repository, you get precise control over what you use and what you pay for. + +## Repository mirroring on your terms + +With Docker Hardened Images, you mirror entire repositories, each giving you +access to all supported tags, variants, and versions. You can choose which +repositories to mirror based on your needs. + +This flexibility allows your organization to adapt as projects evolve, whether +you're spinning up new environments, consolidating runtimes, or managing costs +over time, without worrying about per-image or per-pull fees. + +## Access all variants and versions + +When you mirror a Docker Hardened Image repository, you gain access to all +supported tags in that repository, including multiple versions, base +distributions (such as Alpine and Debian), and dev/runtime variants. You can +freely choose the best tag for each use case without incurring additional cost. + +This flexibility allows teams to adopt secure images without being limited by +billing complexity or image count. + +## Share access across your team + +Once a repository is mirrored, anyone in your organization can pull, verify, +scan, and run images from it. There are no extra charges based on usage volume. +You mirror what you need, and your teams use it freely. + +## Cost efficiency for platform teams + +This model simplifies budgeting for platform and security teams. Rather than +tracking usage at the individual image or tag level, you manage your spend +through the repositories you control, aligning security enforcement, team access, +and cost in one place. diff --git a/content/manuals/dhi/features/integration.md b/content/manuals/dhi/features/integration.md new file mode 100644 index 000000000000..b33a20ad8166 --- /dev/null +++ b/content/manuals/dhi/features/integration.md @@ -0,0 +1,81 @@ +--- +title: Seamless integration +description: Learn how Docker Hardened Images integrate into your existing development and deployment workflows for enhanced security without compromising usability. +description_short: See how Docker Hardened Images integrate with CI/CD pipelines, vulnerability scanners, and container registries across your toolchain +keywords: ci cd containers, vulnerability scanning, slsa build level 3, signed sbom, oci compliant registry +--- + +Docker Hardened Images (DHI) are designed to integrate effortlessly into your +existing development and deployment workflows, ensuring that enhanced security +does not come at the cost of usability. + +## Explore images in Docker Hub + +After your organization [signs +up](https://www.docker.com/products/hardened-images/#getstarted), teams can +explore the full DHI catalog directly on Docker Hub. There, developers and +security teams can: + +- Review available images and language/framework variants +- Understand supported distros +- Compare development vs. runtime variants + +Each repository includes metadata like supported tags, base image +configurations, and image-specific documentation, helping you choose the right variant +for your project. + +## Use DHIs in CI/CD workflows + +You can use DHIs as the same base image in any CI/CD pipeline that is built +using a Dockerfile. They integrate easily into platforms like GitHub Actions, +GitLab CI/CD, Jenkins, CircleCI, and other automation systems your team already +uses. + +## Built to fit your DevSecOps stack + +Docker Hardened Images are designed to work seamlessly with your existing +DevSecOps toolchain. They integrate with scanning tools, registries, CI/CD +systems, and policy engines that teams already use. + +Docker has partnered with a broad range of ecosystem providers in order to +ensure that DHIs work out of the box with your existing workflows and tools. +These partners help deliver enhanced scanning, metadata validation, and +compliance insights directly into your pipelines. + +All DHIs include: + +- Signed Software Bill of Materials (SBOMs) +- CVE data +- Vulnerability Exploitability eXchange (VEX) documents +- SLSA Build Level 3 provenance + +Because the metadata is signed and structured, you can feed it into policy +engines and dashboards for auditing or compliance workflows. + +## Distribute through your preferred registry + +DHIs are mirrored to your organization's namespace on Docker Hub. From there, +you can optionally push them to any OCI-compliant registry, such as: + +- Amazon ECR +- Google Artifact Registry +- GitHub Container Registry +- Azure Container Registry +- Harbor +- JFrog Artifactory +- Other OCI-compliant on-premises or cloud registries + +Mirroring ensures teams can pull images from their preferred location without +breaking policies or build systems. + +## Summary + +Docker Hardened Images integrate with the tools you already use, from development +and CI to scanning and deployment. They: + +- Work with standard Docker tooling and pipelines +- Support popular scanners and registries +- Include security metadata that plugs into your existing compliance systems + +This means you can adopt stronger security controls without disrupting your +engineering workflows. diff --git a/content/manuals/dhi/features/patching.md b/content/manuals/dhi/features/patching.md new file mode 100644 index 000000000000..5c49fe74ce02 --- /dev/null +++ b/content/manuals/dhi/features/patching.md @@ -0,0 +1,42 @@ +--- +title: Continuous patching and secure maintenance +linktitle: Continuous patching +description: Learn how Docker Hardened Images are automatically rebuilt, tested, and updated to stay in sync with upstream security patches. +keywords: docker hardened images, secure base image, automatic patching, CVE updates, compatibility, dev containers, runtime containers, image maintenance +--- + +Docker Hardened Images (DHI) offer a secure and enterprise-ready foundation for +containerized applications, backed by a robust, automated patching process that +helps maintain compliance and reduce vulnerability exposure. + +## Secure base images with strong compatibility + +DHI includes a curated set of minimal base images designed to work across a +broad range of environments and language ecosystems. These images provide secure +building blocks with high compatibility, making it easier to integrate into your +existing infrastructure and development workflows without sacrificing security. + +## Development and runtime variants + +To support different stages of the software lifecycle, DHI provides two key +variants: + +- Development images: Include essential tools and libraries required to build + and test applications securely. +- Runtime images: Contain only the core components needed to run applications, + offering a smaller attack surface and improved runtime efficiency. + +This variant structure supports multi-stage builds, enabling developers to +compile code in secure development containers and deploy with lean runtime +images in production. + +## Automated patching and secure updates + +Docker monitors upstream open-source packages and security advisories for +vulnerabilities (CVEs) and other updates. When changes are detected, affected +Docker Hardened Images are automatically rebuilt and tested. + +Updated images are published with cryptographic provenance attestations to +support verification and compliance workflows. This automated process reduces +the operational burden of manual patching and helps teams stay aligned with +secure software development practices. \ No newline at end of file diff --git a/content/manuals/dhi/features/secure.md b/content/manuals/dhi/features/secure.md new file mode 100644 index 000000000000..d148a1ff4cf6 --- /dev/null +++ b/content/manuals/dhi/features/secure.md @@ -0,0 +1,48 @@ +--- +title: Hardened, secure images +description: Learn how Docker Hardened Images reduce vulnerabilities, enforce non-root execution, and include SLSA-compliant metadata for supply chain security. +keywords: non-root containers, slsa build level 3, signed sbom, vex document, hardened container image +--- + +Docker Hardened Images (DHI) are engineered to provide a robust security +foundation for containerized applications, addressing the evolving challenges of +software supply chain security. + +## Near-zero vulnerabilities and non-root execution + +Each DHI is meticulously built to eliminate known vulnerabilities, achieving +near-zero Common Vulnerabilities and Exposures (CVEs) through continuous +scanning and updates. By adhering to the principle of least privilege, DHI +images run as non-root by default, reducing the risk of privilege escalation +attacks in production environments. + +## Comprehensive supply chain security + +DHI incorporates multiple layers of security metadata to ensure transparency and +trust: + +- SLSA Level 3 compliance: Each image includes detailed build provenance, + meeting the standards set by the Supply-chain Levels for Software Artifacts + (SLSA) framework. + +- Software Bill of Materials (SBOMs): Comprehensive SBOMs are provided, + detailing all components within the image to facilitate vulnerability + management and compliance audits. + +- Vulnerability Exploitability eXchange (VEX) statements: VEX documents + accompany each image, providing context about known vulnerabilities and their + exploitability status. + +- Cryptographic signing and attestations: All images and associated metadata are + cryptographically signed, ensuring integrity and authenticity. + +## Minimal and developer-friendly options + +DHI provides both minimal and development-friendly image variants: + +- Minimal images: Built using a distroless approach, these images remove + unnecessary components, reducing the attack surface by up to 95% and improving + startup times. + +- Development images: Equipped with essential development tools and libraries, + these images facilitate secure application building and testing. \ No newline at end of file diff --git a/content/manuals/dhi/features/support.md b/content/manuals/dhi/features/support.md new file mode 100644 index 000000000000..2da74c0e7fa2 --- /dev/null +++ b/content/manuals/dhi/features/support.md @@ -0,0 +1,28 @@ +--- +title: Enterprise support +description: Get enterprise-grade support and SLA-backed security updates for Docker Hardened Images (DHI), including 24x7x365 access to Docker’s support team and guaranteed CVE patching for critical and high vulnerabilities. +keywords: enterprise container support, sla-backed security, cve patching, secure container image, docker enterprise support +--- + +Docker Hardened Images (DHI) are designed to provide flexibility and robust +support for enterprise environments, allowing teams to tailor images to their +specific needs while ensuring security and compliance. + +## Enterprise-grade support and SLA-backed security updates + +Docker provides comprehensive enterprise support for DHI users, ensuring rapid +response to security threats and operational issues: + +- Enterprise support: Access to Docker's support team, with + response times designed to safeguard mission-critical applications and + maintain operational continuity. + +- SLA-backed CVE mitigation: Docker aims to address Critical and High severity + Common Vulnerabilities and Exposures (CVEs) within 7 working days of an + upstream fix becoming available, with some exceptions. Faster than typical + industry response times and backed by an enterprise-grade SLA, so your teams + can rely on timely fixes to keep workloads secure. + +This level of support ensures that organizations can rely on DHI for their +mission-critical applications, with the assurance that security and stability +are maintained proactively. \ No newline at end of file diff --git a/content/manuals/dhi/get-started.md b/content/manuals/dhi/get-started.md new file mode 100644 index 000000000000..44922acb1ce6 --- /dev/null +++ b/content/manuals/dhi/get-started.md @@ -0,0 +1,121 @@ +--- +linktitle: Quickstart +title: Docker Hardened Images quickstart +description: Follow a quickstart guide to explore, mirror, and run a Docker Hardened Image. +weight: 2 +keywords: docker hardened images quickstart, mirror container image, run secure image +--- + +{{< summary-bar feature_name="Docker Hardened Images" >}} + +This guide shows you how to go from zero to running a Docker Hardened Image +(DHI) using a real example. While the steps use a specific image as an +example, they can be applied to any DHI. + +## Step 1: Sign up and subscribe to DHI for access + +To access Docker Hardened Images, your organization must [sign +up](https://www.docker.com/products/hardened-images/#getstarted) and subscribe. + +## Step 2: Find an image to use + +Once subscribed, Docker Hardened Images will appear under your organization's +namespace on Docker Hub. + +1. Go to [Docker Hub](https://hub.docker.com) and sign in. +2. Select **My Hub** in the top navigation. +3. In the left sidebar, choose your organization that has DHI access. +4. In the left sidebar, select **DHI catalog**. + + ![Docker Hub sidebar showing DHI catalog](./images/dhi-catalog.png) + +5. Use the search bar or filters to find an image (e.g., `python`, `node`, + `golang`). For this guide, use the Python image as an example. + + ![DHI catalog with Python repository shown](./images/dhi-python-search.png) + +6. Select the Python repository to view its details. + +Continue to the next step to mirror the image. To dive deeper into exploring +images see [Explore Docker Hardened Images](./how-to/explore.md). + +## Step 3: Mirror the image + +To use a Docker Hardened Image, you must mirror it to your organization. Only +organization owners can perform this action. Mirroring creates a copy of the +image in your organization's namespace, allowing team members to pull and use +it. + +1. In the image repository page, select **Mirror to repository**. + + ![An image of the Python page with the Mirror to repository button showing](./images/dhi-mirror-button.png) + + > [!NOTE] + > + > If you don't see the **Mirror to repository** button, the repository may + > already be mirrored to your organization. In this case, you can select + > **View in repository** to see the mirrored image's location or mirror it to + > another repository. + +2. Follow the on-screen instructions to choose a name. For this guide, the + example uses the name `dhi-python`. Note that the name must start with + `dhi-`. + + ![Mirror a repository page](./images/dhi-mirror-screen.png) + +3. Select **Create repository** to start the mirroring process. + +It may take a few minutes for all the tags to finish mirroring. Once +mirrored, the image repository appears in your organization's namespace. For +example, in [Docker Hub](https://hub.docker.com), go to **My Hub** > ***YOUR_ORG*** > **Repositories**, +and you should see `dhi-python` listed. You can now pull it +like any other image. + +![Repository list with mirrored repository showing](./images/dhi-python-mirror.png) + +Continue to the next step to pull and run the image. To dive deeper into +mirroring images see [Mirror a Docker Hardened Image +repository](./how-to/mirror.md). + +## Step 4: Pull and run the image + +Once you've mirrored the image to your organization, you can pull and run it +like any other Docker image. Note that Docker Hardened Images are designed to be +minimal and secure, so they may not include all the tools or libraries you +expect in a typical image. You can view the typical differences in +[Considerations when adopting +DHIs](./how-to/use.md#considerations-when-adopting-dhis). + +The following example demonstrates that you can run the Python image and execute +a simple Python command just like you would with any other Docker image: + +1. Pull the mirrored image. Open a terminal and run the following command, + replacing `` with your organization's namespace: + + ```console + $ docker pull /dhi-python:3.13 + ``` + +2. Run the image to confirm everything works: + + ```console + $ docker run --rm /dhi-python:3.13 python -c "print('Hello from DHI')" + ``` + + This starts a container from the `dhi-python:3.13` image and runs a simple + Python script that prints `Hello from DHI`. + +To dive deeper into using images see [Use a Docker Hardened Image](./how-to/use.md). + +## What's next + +You've pulled and run your first Docker Hardened Image. Here are a few ways to keep going: + +- [Migrate existing applications to DHIs](./how-to/migrate.md): Learn how to + update your Dockerfiles to use Docker Hardened Images as the base. + +- [Verify DHIs](./how-to/verify.md): Use tools like [Docker Scout](/scout/) or + Cosign to inspect and verify signed attestations, like SBOMs and provenance. + +- [Scan DHIs](./how-to/scan.md): Analyze the image with Docker + Scout or other scanners to identify known CVEs. \ No newline at end of file diff --git a/content/manuals/dhi/how-to/_index.md b/content/manuals/dhi/how-to/_index.md new file mode 100644 index 000000000000..0492b228b209 --- /dev/null +++ b/content/manuals/dhi/how-to/_index.md @@ -0,0 +1,67 @@ +--- +title: How-tos +description: Step-by-step guidance for working with Docker Hardened Images, from discovery to debugging. +weight: 20 +params: + grid_howto: + - title: Explore Docker Hardened Images + description: Learn how to find and evaluate image repositories, variants, metadata, and attestations in the DHI catalog on Docker Hub. + icon: travel_explore + link: /dhi/how-to/explore/ + - title: Mirror a Docker Hardened Image repository + description: Learn how to mirror an image into your organization's namespace and optionally push it to another private registry. + icon: compare_arrows + link: /dhi/how-to/mirror/ + - title: Use a Docker Hardened Image + description: Learn how to pull, run, and reference Docker Hardened Images in Dockerfiles, CI pipelines, and standard development workflows. + icon: play_arrow + link: /dhi/how-to/use/ + - title: Migrate an existing application to use Docker Hardened Images + description: Follow a step-by-step guide to update your Dockerfiles and adopt Docker Hardened Images for secure, minimal, and production-ready builds. + icon: directions_run + link: /dhi/how-to/migrate/ + - title: Verify a Docker Hardened Image + description: Use Docker Scout or cosign to verify signed attestations like SBOMs, provenance, and vulnerability data for Docker Hardened Images. + icon: check_circle + link: /dhi/how-to/verify/ + - title: Scan a Docker Hardened Image + description: Learn how to scan Docker Hardened Images for known vulnerabilities using Docker Scout, Grype, or Trivy. + icon: bug_report + link: /dhi/how-to/scan/ + - title: Enforce Docker Hardened Image usage with policies + description: Learn how to use image policies with Docker Scout for Docker Hardened Images. + icon: policy + link: /dhi/how-to/policies/ + - title: Debug a Docker Hardened Image + description: Use Docker Debug to inspect a running container based on a hardened image without modifying it. + icon: terminal + link: /dhi/how-to/debug/ +--- + +This section provides practical, step-by-step guidance for working with Docker +Hardened Images (DHIs). Whether you're evaluating DHIs for the first time or +integrating them into a production CI/CD pipeline, these topics walk you +through each phase of the adoption journey, from discovery to debugging. + +To help you get started and stay secure, the topics are organized around the +typical lifecycle of working with DHIs. + +## Lifecycle flow + +1. Explore available images and metadata in the DHI catalog. +2. Mirror trusted images into your namespace or registry. +3. Adopt DHIs in your workflows by pulling, using in development and CI, and + migrating existing applications to use secure, minimal base images. +4. Analyze images by verifying signatures, SBOMs, and provenance, and scanning + for vulnerabilities. +5. Enforce policies to maintain security and compliance. +6. Debug containers based on DHIs without modifying the image. + +Each of the following topics aligns with a step in this lifecycle, so you can progress +confidently through exploration, implementation, and ongoing maintenance. + +## Step-by-step topics + +{{< grid + items="grid_howto" +>}} \ No newline at end of file diff --git a/content/manuals/dhi/how-to/debug.md b/content/manuals/dhi/how-to/debug.md new file mode 100644 index 000000000000..122a25aa3e7b --- /dev/null +++ b/content/manuals/dhi/how-to/debug.md @@ -0,0 +1,77 @@ +--- +title: Debug a Docker Hardened Image container +linkTitle: Debug a container +weight: 60 +keywords: debug, hardened images, DHI, troubleshooting, ephemeral container, docker debug +description: Learn how to use Docker Debug to troubleshoot Docker Hardened Images (DHI) locally or in production. +keywords: docker debug, ephemeral container, non-root containers, hardened container image, debug secure container +--- + +{{< summary-bar feature_name="Docker Hardened Images" >}} + +Docker Hardened Images (DHI) prioritize minimalism and security, which means +they intentionally leave out many common debugging tools (like shells or package +managers). This makes direct troubleshooting difficult without introducing risk. +To address this, you can use [Docker +Debug](../../../reference/cli/docker/debug.md), a secure workflow that +temporarily attaches an ephemeral debug container to a running service or image +without modifying the original image. + +This guide shows how to debug Docker Hardened Images locally during +development. You can also debug containers remotely using the `--host` option. + +The following example uses a mirrored `dhi-python:3.13` image, but the same steps apply to any image. + +## Step 1: Run a container from a Hardened Image + +Start with a DHI-based container that simulates an issue: + +```console +$ docker run -d --name myapp /dhi-python:3.13 python -c "import time; time.sleep(300)" +``` + +This container doesn't include a shell or tools like `ps`, `top`, or `cat`. + +If you try: + +```console +$ docker exec -it myapp sh +``` + +You'll see: + +```console +exec: "sh": executable file not found in $PATH +``` + +## Step 2: Use Docker Debug to inspect the container + +Use the `docker debug` command to attach a temporary, tool-rich debug container to the running instance. + +```console +$ docker debug myapp +``` + +From here, you can inspect running processes, network status, or mounted files. + +For example, to check running processes: + +```console +$ ps aux +``` + +Exit the debug session with: + +```console +$ exit +``` + +## What's next + +Docker Debug helps you troubleshoot hardened containers without compromising the +integrity of the original image. Because the debug container is ephemeral and +separate, it avoids introducing security risks into production environments. + +If you encounter issues related to permissions, ports, missing shells, or +package managers, see [Troubleshoot Docker Hardened Images](../troubleshoot.md) +for recommended solutions and workarounds. \ No newline at end of file diff --git a/content/manuals/dhi/how-to/explore.md b/content/manuals/dhi/how-to/explore.md new file mode 100644 index 000000000000..a023ff99abf8 --- /dev/null +++ b/content/manuals/dhi/how-to/explore.md @@ -0,0 +1,141 @@ +--- +title: Explore Docker Hardened Images +linktitle: Explore images +description: Learn how to find and evaluate image repositories, variants, metadata, and attestations in the DHI catalog on Docker Hub. +keywords: explore docker images, image variants, docker hub catalog, container image metadata, signed attestations +weight: 10 +--- + +{{< summary-bar feature_name="Docker Hardened Images" >}} + +Docker Hardened Images (DHI) are a curated set of secure, production-ready +container images designed for enterprise use. This page explains how to explore +available DHI repositories, review image metadata, examine variant details, and +understand the security attestations provided. Use this information to evaluate +and select the right image variants for your applications before mirroring them +to your organization. + +## Access Docker Hardened Images + +Docker Hardened Images requires a subscription. [Sign +up](https://www.docker.com/products/hardened-images/#getstarted) to access +Docker Hardened Images. + +## Explore Docker Hardened Images + +To explore Docker Hardened Images (DHI): + +1. Go to [Docker Hub](https://hub.docker.com) and sign in. +2. Select **My Hub**. +3. In the namespace drop-down, select your organization that has access to DHI. +4. Select **DHI catalog**. + +On the DHI page, you can browse images, search images, or filter images by +category. + +## View repository details + +To view repository details: + +1. Go to [Docker Hub](https://hub.docker.com) and sign in. +2. Select **My Hub**. +3. In the namespace drop-down, select your organization that has access to DHI. +4. Select **DHI catalog**. +5. Select a repository in the DHI catalog list. + +The repository details page provides the following: + + - Overview: A brief explanation of the image. + - Guides: Several guides on how to use the image and migrate your existing application. + - Tags: Select this option to [view image variants](#view-image-variants). + - Security summary: Select a tag name to view a quick security summary, + including package count, total known vulnerabilities, and Scout health score. + - Recently pushed tags: A list of recently updated image variants and when they + were last updated. + - Mirror to repository: Select this option to mirror the image to your + organization's repository in order to use it. Only organization owners can mirror a repository. + - View in repository: After a repository has been mirrored, you can select this + option to view where the repository has been mirrored, or mirror it to another repository. + +## View image variants + +Tags are used to identify image variants. Image variants are different builds of +the same application or framework tailored for different use-cases. + +To explore image variants: + +1. Go to [Docker Hub](https://hub.docker.com) and sign in. +2. Select **My Hub**. +3. In the namespace drop-down, select your organization that has access to DHI. +4. Select **DHI catalog**. +5. Select a repository in the DHI catalog list. +6. Select **Tags**. + +The **Tags** page provides the following information: + +- Tags: A list of all available tags, also known as image variants. +- Compliance: Lists relevant compliance designations. For example, `FIPS`. +- Distribution: The distribution that the variant is based on. For example, `debian 12` or `alpine 3.21`. +- Package manager: The package manager that is available in the variant. For example, `apt`, `apk`, or `-` (no package manager). +- Shell: The shell that is available in the variant. For example, `bash`, `busybox`, or `-` (no shell). +- User: The user that the container runs as. For example, `root`, `nonroot (65532)`, or `node (1000)`. +- Last pushed: The amount of days ago that the image variant was last pushed. +- Vulnerabilities: The amount of vulnerabilities in the variant based on the severity. +- Health: The Scout health score for the variant. Select the score icon to get more details. + +> [!NOTE] +> +> Unlike most images on Docker Hub, Docker Hardened Images do not use the +> `latest` tag. Each image variant is published with a full semantic version tag +> (for example, `3.13`, `3.13-dev`) and is kept up to date. If you need to pin +> to a specific image release for reproducibility, you can reference the image +> by its [digest](../core-concepts/digests.md). + +## View image variant details + +To explore the details of an image variant: + +1. Go to [Docker Hub](https://hub.docker.com) and sign in. +2. Select **My Hub**. +3. In the namespace drop-down, select your organization that has access to DHI. +4. Select **DHI catalog**. +5. Select a repository in the DHI catalog list. +6. Select **Tags**. +7. Select the image variant's tag in the table. + +The image variant details page provides the following information: + +- Packages: A list of all packages included in the image variant. This section + includes details about each package, including its name, version, + distribution, and licensing information. +- Specifications: The specifications for the image variant include the following + key details: + - Source & Build Information: The image is built from the Dockerfile found + here and the Git commit. + - Build parameters + - Entrypoint + - CMD + - User + - Working directory + - Environment Variables + - Labels + - Platform +- Vulnerabilities: The vulnerabilities section provides a list of known CVEs for + the image variant, including: + - CVE + - Severity + - Package + - Fix version + - Last detected + - Status + - Suppressed CVEs +- Attestations: Variants include comprehensive security attestations to verify + the image's build process, contents, and security posture. These attestations + are signed and can be verified using cosign. For a list of available + attestations, see [Attestations](../core-concepts/attestations.md). + +## What's next + +After finding an image you need, you can [mirror the image to your +organization](./mirror.md). If the image is already mirrored, then you can start +[using the image](./use.md). \ No newline at end of file diff --git a/content/manuals/dhi/how-to/migrate.md b/content/manuals/dhi/how-to/migrate.md new file mode 100644 index 000000000000..22b1d825d35f --- /dev/null +++ b/content/manuals/dhi/how-to/migrate.md @@ -0,0 +1,251 @@ +--- +title: Migrate an existing application to use Docker Hardened Images +linktitle: Migrate an app +description: Follow a step-by-step guide to update your Dockerfiles and adopt Docker Hardened Images for secure, minimal, and production-ready builds. +weight: 50 +keywords: migrate dockerfile, hardened base image, multi-stage build, non-root containers, secure container build +--- + +{{< summary-bar feature_name="Docker Hardened Images" >}} + +This guide helps you migrate your existing Dockerfiles to use Docker Hardened +Images (DHIs) [manually](#step-1-update-the-base-image-in-your-dockerfile), +or with [Gordon](#use-gordon). +DHIs are minimal and security-focused, which may require +adjustments to your base images, build process, and runtime configuration. + +This guide focuses on migrating framework images, such as images for building +applications from source using languages like Go, Python, or Node.js. If you're +migrating application images, such as databases, proxies, or other prebuilt +services, many of the same principles still apply. + +## Migration considerations + +DHIs omit common tools such as shells and package managers to +reduce the attack surface. They also default to running as a nonroot user. As a +result, migrating to DHI typically requires the following changes to your +Dockerfile: + + +| Item | Migration note | +|:-------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Base image | Replace your base images in your Dockerfile with a Docker Hardened Image. | +| Package management | Images intended for runtime, don't contain package managers. Use package managers only in images with a `dev` tag. Utilize multi-stage builds and copy necessary artifacts from the build stage to the runtime stage. | +| Non-root user | By default, images intended for runtime, run as the nonroot user. Ensure that necessary files and directories are accessible to the nonroot user. | +| Multi-stage build | Utilize images with a `dev` or `sdk` tags for build stages and non-dev images for runtime. | +| TLS certificates | DHIs contain standard TLS certificates by default. There is no need to install TLS certificates. | +| Ports | DHIs intented for runtime run as a nonroot user by default. As a result, applications in these images can't bind to privileged ports (below 1024) when running in Kubernetes or in Docker Engine versions older than 20.10. To avoid issues, configure your application to listen on port 1025 or higher inside the container. | +| Entry point | DHIs may have different entry points than images such as Docker Official Images. Inspect entry points for DHIs and update your Dockerfile if necessary. | +| No shell | DHIs intended for runtime don't contain a shell. Use dev images in build stages to run shell commands and then copy artifacts to the runtime stage. | + +For more details and troubleshooting tips, see the [Troubleshoot](/manuals/dhi/troubleshoot.md). + +## Migrate an existing application + +The following steps outline the migration process. + +### Step 1: Update the base image in your Dockerfile + +Update the base image in your application’s Dockerfile to a hardened image. This +is typically going to be an image tagged as `dev` or `sdk` because it has the tools +needed to install packages and dependencies. + +The following example diff snippet from a Dockerfile shows the old base image +replaced by the new hardened image. + +```diff +- ## Original base image +- FROM golang:1.22 + ++ ## Updated to use hardened base image ++ FROM /dhi-golang:1.22-dev +``` + +### Step 2: Update the runtime image in your Dockerfile + +To ensure that your final image is as minimal as possible, you should use a +[multi-stage build](/manuals/build/building/multi-stage.md). All stages in your +Dockerfile should use a hardened image. While intermediary stages will typically +use images tagged as `dev` or `sdk`, your final runtime stage should use a runtime image. + +Utilize the build stage to compile your application and copy the resulting +artifacts to the final runtime stage. This ensures that your final image is +minimal and secure. + +See the [Example Dockerfile migrations](#example-dockerfile-migrations) section for +examples of how to update your Dockerfile. + +## Example Dockerfile migrations + +The following migration examples show a Dockerfile before the migration and +after the migration. + +### Go example + +{{< tabs >}} +{{< tab name="Before" >}} + +```dockerfile +#syntax=docker/dockerfile:1 + +FROM golang:latest + +WORKDIR /app +ADD . ./ +RUN CGO_ENABLED=0 GOOS=linux go build -a -ldflags="-s -w" --installsuffix cgo -o main . + +ENTRYPOINT ["/app/main"] +``` + +{{< /tab >}} +{{< tab name="After" >}} + +```dockerfile +#syntax=docker/dockerfile:1 + +# === Build stage: Compile Go application === +FROM /dhi-golang:1-alpine3.21-dev AS builder + +WORKDIR /app +ADD . ./ +RUN CGO_ENABLED=0 GOOS=linux go build -a -ldflags="-s -w" --installsuffix cgo -o main . + +# === Final stage: Create minimal runtime image === +FROM /dhi-golang:1-alpine3.21 + +WORKDIR /app +COPY --from=builder /app/main /app/main + +ENTRYPOINT ["/app/main"] +``` +{{< /tab >}} +{{< /tabs >}} + +### Node.js example + +{{< tabs >}} +{{< tab name="Before" >}} + +```dockerfile +#syntax=docker/dockerfile:1 + +FROM node:latest +WORKDIR /usr/src/app + +COPY package*.json ./ +RUN npm install + +COPY image.jpg ./image.jpg +COPY . . + +CMD ["node", "index.js"] +``` + +{{< /tab >}} +{{< tab name="After" >}} + +```dockerfile +#syntax=docker/dockerfile:1 + +#=== Build stage: Install dependencies and build application ===# +FROM /dhi-node:23-alpine3.21-dev AS builder +WORKDIR /usr/src/app + +COPY package*.json ./ +RUN npm install + +COPY image.jpg ./image.jpg +COPY . . + +#=== Final stage: Create minimal runtime image ===# +FROM /dhi-node:23-alpine3.21 +ENV PATH=/app/node_modules/.bin:$PATH + +COPY --from=builder --chown=node:node /usr/src/app /app + +WORKDIR /app + +CMD ["index.js"] +``` +{{< /tab >}} +{{< /tabs >}} + +### Python example + +{{< tabs >}} +{{< tab name="Before" >}} + +```dockerfile +#syntax=docker/dockerfile:1 + +FROM python:latest AS builder + +ENV LANG=C.UTF-8 +ENV PYTHONDONTWRITEBYTECODE=1 +ENV PYTHONUNBUFFERED=1 +ENV PATH="/app/venv/bin:$PATH" + +WORKDIR /app + +RUN python -m venv /app/venv +COPY requirements.txt . + +RUN pip install --no-cache-dir -r requirements.txt + +FROM python:latest + +WORKDIR /app + +ENV PYTHONUNBUFFERED=1 +ENV PATH="/app/venv/bin:$PATH" + +COPY image.py image.png ./ +COPY --from=builder /app/venv /app/venv + +ENTRYPOINT [ "python", "/app/image.py" ] +``` + +{{< /tab >}} +{{< tab name="After" >}} + +```dockerfile +#syntax=docker/dockerfile:1 + +#=== Build stage: Install dependencies and create virtual environment ===# +FROM /dhi-python:3.13-alpine3.21-dev AS builder + +ENV LANG=C.UTF-8 +ENV PYTHONDONTWRITEBYTECODE=1 +ENV PYTHONUNBUFFERED=1 +ENV PATH="/app/venv/bin:$PATH" + +WORKDIR /app + +RUN python -m venv /app/venv +COPY requirements.txt . + +RUN pip install --no-cache-dir -r requirements.txt + +#=== Final stage: Create minimal runtime image ===# +FROM /dhi-python:3.13-alpine3.21 + +WORKDIR /app + +ENV PYTHONUNBUFFERED=1 +ENV PATH="/app/venv/bin:$PATH" + +COPY image.py image.png ./ +COPY --from=builder /app/venv /app/venv + +ENTRYPOINT [ "python", "/app/image.py" ] +``` + +{{< /tab >}} +{{< /tabs >}} + +### Use Gordon + +Alternatively, you can request assistance to +[Gordon](/manuals/ai/gordon/_index.md), Docker's AI-powered assistant, to migrate your Dockerfile: + +{{% include "gordondhi.md" %}} diff --git a/content/manuals/dhi/how-to/mirror.md b/content/manuals/dhi/how-to/mirror.md new file mode 100644 index 000000000000..7c9b32eff8d1 --- /dev/null +++ b/content/manuals/dhi/how-to/mirror.md @@ -0,0 +1,179 @@ +--- +title: Mirror a Docker Hardened Image repository +linktitle: Mirror an image +description: Learn how to mirror an image into your organization's namespace and optionally push it to another private registry. +weight: 20 +keywords: mirror docker image, private container registry, docker hub automation, webhook image sync, secure image distribution +--- + +{{< summary-bar feature_name="Docker Hardened Images" >}} + +Before you can use a Docker Hardened Image (DHI), you must mirror its repository +to your organization. Only organization owners can perform this action. Once +mirrored, the image becomes available in your organization's namespace, and +users with access can begin pulling and using it. + +Mirrored repositories automatically stay up to date. Docker continues to sync +new tags and image updates from the upstream DHI catalog, so you always have +access to the latest secure version. + +## Prerequisites + +- To manage mirroring, you must be an [organization owner](/admin/). +- Your organization must be [signed + up](https://www.docker.com/products/hardened-images/#getstarted) to use + Docker Hardened Images. + +## Mirror an image repository + +To mirror a Docker Hardened Image repository: + +1. Go to [Docker Hub](https://hub.docker.com) and sign in. +2. Select **My Hub**. +3. In the namespace drop-down, select your organization that has access to DHI. +4. Select **DHI catalog**. +5. Select a DHI repository to view its details. +6. Select **Mirror to repository** and follow the on-screen instructions. + + +It may take a few minutes for all the tags to finish mirroring. Once an image +has been mirrored, the **Mirror to repository** button changes to **View in +repository**. Selecting **View in repository** opens a drop-down list of +repositories that the image has already been mirrored to. From this drop-down, +you can: + + - Select an existing mirrored repository to view its details + - Select **Mirror to repository** again to mirror the image to an additional + repository + +After mirroring a repository, the repository appears in your organization's +repository list under the name you specified, prefixed by `dhi-`. It will +continue to receive updated images. + +![Repository list with mirrored repository showing](../images/dhi-python-mirror.png) + +> [!IMPORTANT] +> +> The mirrored repository's visibility must remain private. Changing its +> visibility to public will stop updates from being mirrored. + +Once mirrored, the image repository works like any other private repository on +Docker Hub. Team members with access to the repository can now pull and use the +image. To learn how to manage access, view tags, or configure settings, see +[Repositories](/manuals/docker-hub/repos.md). + +### Webhook integration for syncing and alerts + +To keep external registries or systems in sync with your mirrored Docker +Hardened Images, and to receive notifications when updates occur, you can +configure a [webhook](/docker-hub/repos/manage/webhooks/) on the mirrored +repository in Docker Hub. A webhook sends a `POST` request to a URL you define +whenever a new image tag is pushed or updated. + +For example, you might configure a webhook to call a CI/CD system at +`https://ci.example.com/hooks/dhi-sync` whenever a new tag is mirrored. The +automation triggered by this webhook can pull the updated image from Docker Hub +and push it to an internal registry such as Amazon ECR, Google Artifact +Registry, or GitHub Container Registry. + +Other common webhook use cases include: + +- Triggering validation or vulnerability scanning workflows +- Signing or promoting images +- Sending notifications to downstream systems + +#### Example webhook payload + +When a webhook is triggered, Docker Hub sends a JSON payload like the following: + +```json +{ + "callback_url": "https://registry.hub.docker.com/u/exampleorg/dhi-python/hook/abc123/", + "push_data": { + "pushed_at": 1712345678, + "pusher": "trustedbuilder", + "tag": "3.13-alpine3.21" + }, + "repository": { + "name": "dhi-python", + "namespace": "exampleorg", + "repo_name": "exampleorg/dhi-python", + "repo_url": "https://hub.docker.com/r/exampleorg/dhi-python", + "is_private": true, + "status": "Active", + ... + } +} +``` + +## Stop mirroring an image repository + +Only organization owners can stop mirroring a repository. After you stop +mirroring, the repository remains, but it will +no longer receive updates. You can still pull the last image that was mirrored, +but the repository will not receive new tags or updates from the original +repository. + + To stop mirroring an image repository: + +1. Go to the mirrored repository in your organization's namespace. +2. Select **Stop mirroring**. + +Once you have stopped mirroring a repository, you can choose another DHI +repository to mirror. + +## Mirror from Docker Hub to another registry + +After you've mirrored a Docker Hardened Image repository to your organization's +namespace on Docker Hub, you can optionally mirror it to another container +registry, such as Amazon ECR, Google Artifact Registry, GitHub Container +Registry, or a private Harbor instance. + +You can use any standard workflow, including: + +- [The Docker CLI](/reference/cli/docker/_index.md) +- [The Docker Hub Registry API](/reference/api/registry/latest/) +- Third-party registry tools or CI/CD automation + +The following example shows how to use the Docker CLI to pull a mirrored DHI and +push it to another registry: + +```console +# Authenticate to Docker Hub (if not already signed in) +$ docker login + +# Pull the image from your organization's namespace on Docker Hub +$ docker pull /dhi-: + +# Tag the image for your destination registry +$ docker tag /dhi-: registry.example.com/my-project/: + +# Push the image to the destination registry +# You will need to authenticate to the third-party registry before pushing +$ docker push registry.example.com/my-project/: +``` + +> [!IMPORTANT] +> +> To continue receiving image updates and preserve access to Docker Hardened +> Images, ensure that any copies pushed to other registries remain private. + +### Include attestations when mirroring images + +Docker Hardened Images are signed and include associated attestations that +provide metadata such as build provenance and vulnerability scan results. These +attestations are stored as OCI artifacts and are not included by default when +using the Docker CLI to mirror images. + +To preserve the full security context when copying DHIs to another registry, you +must explicitly include the attestations. One tool is `regctl`, which supports +copying both images and their associated artifacts. + +For more details on how to use `regctl` to copy images and their associated +artifacts, see the [regclient +documentation](https://regclient.org/cli/regctl/image/copy/). + +## What's next + +After mirroring an image repository, you can you can start [using the +image](./use.md). \ No newline at end of file diff --git a/content/manuals/dhi/how-to/policies.md b/content/manuals/dhi/how-to/policies.md new file mode 100644 index 000000000000..60cf50f4d99c --- /dev/null +++ b/content/manuals/dhi/how-to/policies.md @@ -0,0 +1,110 @@ +--- +title: Enforce Docker Hardened Image usage with policies +linktitle: Enforce image usage +description: Learn how to use image policies with Docker Scout for Docker Hardened Images. +weight: 50 +keywords: docker scout policies, enforce image compliance, container security policy, image provenance, vulnerability policy check +--- + +{{< summary-bar feature_name="Docker Hardened Images" >}} + +Mirroring a Docker Hardened Image (DHI) repository automatically enables [Docker +Scout](/scout/), allowing you to start enforcing security and compliance policies for your +images without additional setup. Using Docker Scout policies, you can define and +apply rules that ensure only approved and secure images, such as those based on +DHIs, are used across your environments. + +With policy evaluation built into Docker Scout, you can monitor image compliance +in real time, integrate checks into your CI/CD workflows, and maintain +consistent standards for image security and provenance. + +## View existing policies + +To see the current policies applied to a mirrored DHI repository: + +1. Go to the mirrored DHI repository in [Docker Hub](https://hub.docker.com). +2. Select **View on Scout**. + + This opens the [Docker Scout dashboard](https://scout.docker.com), where you + can see which policies are currently active and whether your images meet the + policy criteria. + +Docker Scout automatically evaluates policy compliance when new images are +pushed. Each policy includes a compliance result and a link to the affected +images and layers. + +## Create policies for your DHI-based images + +To ensure that the images you build using Docker Hardened Images remain secure, +you can create Docker Scout policies tailored to your requirements for your own +repositories. These policies help enforce security standards such as preventing +high-severity vulnerabilities, requiring up-to-date base images, or validating +the presence of key metadata. + +Policies evaluate images when they are pushed to a repository, allowing you to +track compliance, get notified of deviations, and integrate policy checks into +your CI/CD pipeline. + +### Example: Create a policy for DHI-based images + +This example shows how to create a policy that requires all images in your +organization to use Docker Hardened Images as their base. This ensures that +your applications are built on secure, minimal, and production-ready images. + +#### Step 1: Use a DHI base image in your Dockerfile + +Create a Dockerfile that uses a Docker Hardened Image mirrored repository as the +base. For example: + +```dockerfile +# Dockerfile +FROM ORG_NAME/dhi-python:3.13-alpine3.21 + +ENTRYPOINT ["python", "-c", "print('Hello from a DHI-based image')"] +``` + +#### Step 2: Build and push the image + +Open a terminal and navigate to the directory containing your Dockerfile. Then, +build and push the image to your Docker Hub repository: + +```console +$ docker build \ + --push \ + -t YOUR_ORG/my-dhi-app:v1 . +``` + +#### Step 3: Enable Docker Scout + +To enable Docker Scout for your organization and the repository, run the +following commands in your terminal: + +```console +$ docker login +$ docker scout enroll YOUR_ORG +$ docker scout repo enable --org YOUR_ORG YOUR_ORG/my-dhi-app +``` + +#### Step 4: Create a policy + +1. Go to the [Docker Scout dashboard](https://scout.docker.com). +2. Select your organization and navigate to **Policies**. +3. Select **Add policy**. +4. Select **Configure** for **Approved Base Images Policy**. +5. Give the policy a compliant name, such as **Approved DHI Base Images**. +6. In **Approved base image sources**, delete the default item. +7. In **Approved base image sources**, add approved base image sources. For this + example, use the wildcard (`*`) to allow all mirrored DHI repositories, + `docker.io/ORG_NAME/dhi-*`. Replace `ORG_NAME` with your organization name. +8. Select **Save policy**. + +#### Step 4: Evaluate policy compliance + +1. Go to the [Docker Scout dashboard](https://scout.docker.com). +2. Select your organization and navigate to **Images**. +3. Find your image, `YOUR_ORG/my-dhi-app:v1`, and select the link in the **Compliance** column. + +This shows the policy compliance results for your image, including whether it +meets the requirements of the **Approved DHI Base Images** policy. + +You can now [evaluate policy compliance in your CI](/scout/policy/ci/). \ No newline at end of file diff --git a/content/manuals/dhi/how-to/scan.md b/content/manuals/dhi/how-to/scan.md new file mode 100644 index 000000000000..05f8cfc95b5e --- /dev/null +++ b/content/manuals/dhi/how-to/scan.md @@ -0,0 +1,220 @@ +--- +title: Scan Docker Hardened Images +linktitle: Scan an image +description: Learn how to scan Docker Hardened Images for known vulnerabilities using Docker Scout, Grype, or Trivy. +keywords: scan container image, docker scout cves, grype scanner, trivy container scanner, vex attestation +weight: 45 +--- + +{{< summary-bar feature_name="Docker Hardened Images" >}} + +Docker Hardened Images (DHIs) are designed to be secure by default, but like any +container image, it's important to scan them regularly as part of your +vulnerability management process. + +You can scan DHIs using the same tools you already use for standard images, such +as Docker Scout, Grype, and Trivy. DHIs follow the same formats and standards +for compatibility across your security tooling. Before you scan an image, the image must +be mirrored into your organization on Docker Hub. + +> [!NOTE] +> +> [Docker Scout](/manuals/scout/_index.md) is automatically enabled at no +> additional cost for all mirrored Docker Hardened Image repositories on Docker +> Hub. You can view scan results directly in the Docker Hub UI under your +> organization's repository. + +## Docker Scout + +Docker Scout is integrated into Docker Desktop and the Docker CLI. It provides +vulnerability insights, CVE summaries, and direct links to remediation guidance. + +### Scan a DHI using Docker Scout + +To scan a Docker Hardened Image using Docker Scout, run the following +command: + +```console +$ docker scout cves /dhi-: --platform +``` + +Example output: + +```plaintext + v SBOM obtained from attestation, 101 packages found + v Provenance obtained from attestation + v VEX statements obtained from attestation + v No vulnerable package detected + ... +``` + +For more detailed filtering and JSON output, see [Docker Scout CLI reference](../../../reference/cli/docker/scout/_index.md). + +### Automate DHI scanning in CI/CD with Docker Scout + +Integrating Docker Scout into your CI/CD pipeline enables you to automatically +verify that images built from Docker Hardened Images remain free from known +vulnerabilities during the build process. This proactive approach ensures the +continued security integrity of your images throughout the development +lifecycle. + +#### Example GitHub Actions workflow + +The following is a sample GitHub Actions workflow that builds an image and scans +it using Docker Scout: + +```yaml {collapse="true"} +name: DHI Vulnerability Scan + +on: + push: + branches: [ main ] + pull_request: + branches: [ "**" ] + +env: + REGISTRY: docker.io + IMAGE_NAME: ${{ github.repository }} + SHA: ${{ github.event.pull_request.head.sha || github.event.after }} + +jobs: + scan: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + pull-requests: write + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to Docker Hub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Build Docker image + run: | + docker build -t ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ env.SHA }} . + + - name: Run Docker Scout CVE scan + uses: docker/scout-action@v1 + with: + command: cves + image: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ env.SHA }} + only-severities: critical,high + exit-code: true +``` + +The `exit-code: true` parameter ensures that the workflow fails if any critical or +high-severity vulnerabilities are detected, preventing the deployment of +insecure images. + +For more details on using Docker Scout in CI, see [Integrating Docker +Scout with other systems](/manuals/scout/integrations/_index.md). + +## Grype + +[Grype](https://github.com/anchore/grype) is an open-source scanner that checks +container images against vulnerability databases like the NVD and distro +advisories. + +### Scan a DHI using Grype + +After installing Grype, you can scan a Docker Hardened Image by pulling +the image and running the scan command: + +```console +$ docker pull /dhi-: +$ grype /dhi-: +``` + +Example output: + +```plaintext +NAME INSTALLED FIXED-IN TYPE VULNERABILITY SEVERITY EPSS% RISK +libperl5.36 5.36.0-7+deb12u2 (won't fix) deb CVE-2023-31484 High 79.45 1.1 +perl 5.36.0-7+deb12u2 (won't fix) deb CVE-2023-31484 High 79.45 1.1 +perl-base 5.36.0-7+deb12u2 (won't fix) deb CVE-2023-31484 High 79.45 1.1 +... +``` + +You should include the `--vex` flag to apply VEX statements during the scan, +which filter out known non-exploitable CVEs. For more information, see the [VEX +section](#use-vex-to-filter-known-non-exploitable-cves). + +## Trivy + +[Trivy](https://github.com/aquasecurity/trivy) is an open-source vulnerability +scanner for containers and other artifacts. It detects vulnerabilities in OS +packages and application dependencies. + +### Scan a DHI using Trivy + +After installing Trivy, you can scan a Docker Hardened Image by pulling +the image and running the scan command: + +```console +$ docker pull /dhi-: +$ trivy image /dhi-: +``` + +Example output: + +```plaintext +Report Summary + +┌──────────────────────────────────────────────────────────────────────────────┬────────────┬─────────────────┬─────────┐ +│ Target │ Type │ Vulnerabilities │ Secrets │ +├──────────────────────────────────────────────────────────────────────────────┼────────────┼─────────────────┼─────────┤ +│ /dhi-: (debian 12.11) │ debian │ 66 │ - │ +├──────────────────────────────────────────────────────────────────────────────┼────────────┼─────────────────┼─────────┤ +│ opt/python-3.13.4/lib/python3.13/site-packages/pip-25.1.1.dist-info/METADATA │ python-pkg │ 0 │ - │ +└──────────────────────────────────────────────────────────────────────────────┴────────────┴─────────────────┴─────────┘ +``` + +You should include the `--vex` flag to apply VEX statements during the scan, +which filter out known non-exploitable CVEs. For more information, see the [VEX +section](#use-vex-to-filter-known-non-exploitable-cves). + +## Use VEX to filter known non-exploitable CVEs + +Docker Hardened Images include signed VEX (Vulnerability Exploitability +eXchange) attestations that identify vulnerabilities not relevant to the image’s +runtime behavior. + +When using Docker Scout, these VEX statements are automatically applied and no +manual configuration needed. + +To manually create a JSON file VEX attestation for tools that support it: + +```console +$ docker scout attest get \ + --predicate-type https://openvex.dev/ns/v0.2.0 \ + --predicate \ + /dhi-: --platform > vex.json +``` + +For example: + +```console +$ docker scout attest get \ + --predicate-type https://openvex.dev/ns/v0.2.0 \ + --predicate \ + docs/dhi-python:3.13 --platform linux/amd64 > vex.json +``` + +This creates a `vex.json` file containing the VEX statements for the specified +image. You can then use this file with tools that support VEX to filter out known non-exploitable CVEs. + +For example, with Grype and Trivy, you can use the `--vex` flag to apply the VEX +statements during the scan: + +```console +$ grype /dhi-: --vex vex.json +``` \ No newline at end of file diff --git a/content/manuals/dhi/how-to/use.md b/content/manuals/dhi/how-to/use.md new file mode 100644 index 000000000000..93be73141bba --- /dev/null +++ b/content/manuals/dhi/how-to/use.md @@ -0,0 +1,187 @@ +--- +title: Use a Docker Hardened Image +linktitle: Use an image +description: Learn how to pull, run, and reference Docker Hardened Images in Dockerfiles, CI pipelines, and standard development workflows. +keywords: use hardened image, docker pull secure image, non-root containers, multi-stage dockerfile, dev image variant +weight: 30 +--- + +{{< summary-bar feature_name="Docker Hardened Images" >}} + +You can use a Docker Hardened Image (DHI) just like any other image on Docker +Hub. DHIs follow the same familiar usage patterns. Pull them with `docker pull`, +reference them in your Dockerfile, and run containers with `docker run`. + +The key difference is that DHIs are security-focused and intentionally minimal +to reduce the attack surface. This means some variants don't include a shell or +package manager, and may run as a nonroot user by default. + +> [!NOTE] +> +> You don't need to change your existing workflows. Whether you're pulling +> images manually, referencing them in your Dockerfiles, or integrating them +> into CI pipelines, DHIs work just like the images you already use. + +After [mirroring](./mirror.md) a DHI to your organization's namespace, the image +becomes available for use. To find your mirrored repository, go to the original +image's page in the Hardened Images catalog and select **View in repository**, +to show a list of mirrored repositories. + +## Considerations when adopting DHIs + +Docker Hardened Images are intentionally minimal to improve security. If you're updating existing Dockerfiles or frameworks to use DHIs, keep the following considerations in mind: + +| Feature | Details | +|--------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| No shell or package manager | Runtime images don’t include a shell or package manager. Use `-dev` or `-sdk` variants in build stages to run shell commands or install packages, and then copy artifacts to a minimal runtime image. | +| Non-root runtime | Runtime DHIs default to running as a non-root user. Ensure your application doesn't require privileged access and that all needed files are readable and executable by a non-root user. | +| Ports | Applications running as non-root users can't bind to ports below 1024 in older versions of Docker or in some Kubernetes configurations. Use ports above 1024 for compatibility. | +| Entry point | DHIs may not include a default entrypoint or might use a different one than the original image you're familiar with. Check the image configuration and update your `CMD` or `ENTRYPOINT` directives accordingly. | +| Multi-stage builds | Always use multi-stage builds for frameworks: a `-dev` image for building or installing dependencies, and a minimal runtime image for the final stage. | +| TLS certificates | DHIs include standard TLS certificates. You do not need to manually install CA certs. | + +If you're migrating an existing application, see [Migrate an existing +application to use Docker Hardened Images](./migrate.md). + +## Use a DHI in a Dockerfile + +To use a DHI as the base image for your container, specify it in the `FROM` instruction in your Dockerfile: + +```dockerfile +FROM /dhi-: +``` + +Replace the image name and tag with the variant you want to use. For example, +use a `-dev` tag if you need a shell or package manager during build stages: + +```dockerfile +FROM /dhi-python:3.13-dev AS build +``` + +To learn how to explore available variants, see [Explore images](./explore.md). + +> [!TIP] +> +> Use a multi-stage Dockerfile to separate build and runtime stages, using a +> `-dev` variant in build stages and a minimal runtime image in the final stage. + +## Pull a DHI from Docker Hub + +Just like any other image on Docker Hub, you can pull Docker Hardened Images +(DHIs) using tools such as the Docker CLI, the Docker Hub Registry API, or +within your CI pipelines. + +The following example shows how to pull a DHI using the CLI: + +```console +$ docker pull /dhi-: +``` + +You must have access to the image in your Docker Hub namespace. For more +information, see [Mirror a Docker Hardened Image](./mirror.md). + +## Run a DHI + +After pulling the image, you can run it using `docker run`. For example, +assuming the repository was mirrored to `dhi-python` in your organization +namespace, start a container and run a Python command: + +```console +$ docker run --rm /dhi-python:3.13 python -c "print('Hello from DHI')" +``` + +## Use a DHI in CI/CD pipelines + +Docker Hardened Images work just like any other image in your CI/CD pipelines. +You can reference them in Dockerfiles, pull them as part of a pipeline step, or +run containers based on them during builds and tests. + +Unlike typical container images, DHIs also include signed +[attestations](../core-concepts/attestations.md) such as SBOMs and provenance +metadata. You can incorporate these into your pipeline to support supply chain +security, policy checks, or audit requirements if your tooling supports it. + +To strengthen your software supply chain, consider adding your own attestations +when building images from DHIs. This lets you document how the image was +built, verify its integrity, and enable downstream validation and [policy +enforcement](./policies.md) using tools like Docker Scout. + +To learn how to attach attestations during the build process, see [Docker Build +Attestations](/manuals/build/metadata/attestations.md) . + +## Use a static image for compiled executables + +Docker Hardened Images include a `static` image repository designed specifically +for running compiled executables in an extremely minimal and secure runtime. + +Use a `-dev` or other builder image in an earlier stage to compile your binary, +and copy the output into a `static` image. + +The following example shows a multi-stage Dockerfile that builds a Go application +and runs it in a minimal static image: + +```dockerfile +#syntax=docker/dockerfile:1 + +FROM /dhi-golang:1.22-dev AS build +WORKDIR /app +COPY . . +RUN CGO_ENABLED=0 go build -o myapp + +FROM /dhi-static:20230311 +COPY --from=build /app/myapp /myapp +ENTRYPOINT ["/myapp"] +``` + +This pattern ensures a hardened runtime environment with no unnecessary +components, reducing the attack surface to a bare minimum. + +## Use dev variants for framework-based applications + +If you're building applications with frameworks that require package managers or +build tools (such as Python, Node.js, or Go), use a `-dev` variant during the +development or build stage. These variants include essential utilities like +shells, compilers, and package managers to support local iteration and CI +workflows. + +Use `-dev` images in your inner development loop or in isolated CI stages to +maximize productivity. Once you're ready to produce artifacts for production, +switch to a smaller runtime variant to reduce the attack surface and image size. + +The following example shows how to build a Python app using a `-dev` variant and +run it using the smaller runtime variant: + +```dockerfile +#syntax=docker/dockerfile:1 + +FROM /dhi-python:3.13-alpine3.21-dev AS builder + +ENV LANG=C.UTF-8 +ENV PYTHONDONTWRITEBYTECODE=1 +ENV PYTHONUNBUFFERED=1 +ENV PATH="/app/venv/bin:$PATH" + +WORKDIR /app + +RUN python -m venv /app/venv +COPY requirements.txt . + +RUN pip install --no-cache-dir -r requirements.txt + +FROM /dhi-python:3.13-alpine3.21 + +WORKDIR /app + +ENV PYTHONUNBUFFERED=1 +ENV PATH="/app/venv/bin:$PATH" + +COPY image.py image.png ./ +COPY --from=builder /app/venv /app/venv + +ENTRYPOINT [ "python", "/app/image.py" ] +``` + +This pattern separates the build environment from the runtime environment, +helping reduce image size and improve security by removing unnecessary tooling +from the final image. + diff --git a/content/manuals/dhi/how-to/verify.md b/content/manuals/dhi/how-to/verify.md new file mode 100644 index 000000000000..8b305afcaa81 --- /dev/null +++ b/content/manuals/dhi/how-to/verify.md @@ -0,0 +1,187 @@ +--- +title: Verify a Docker Hardened Image +linktitle: Verify an image +description: Use Docker Scout or cosign to verify signed attestations like SBOMs, provenance, and vulnerability data for Docker Hardened Images. +weight: 40 +keywords: verify container image, docker scout attest, cosign verify, sbom validation, signed container attestations +--- + +{{< summary-bar feature_name="Docker Hardened Images" >}} + +Docker Hardened Images (DHI) include signed attestations that verify the image’s +build process, contents, and security posture. These attestations are available +for each image variant and can be verified using +[cosign](https://docs.sigstore.dev/) or the Docker Scout CLI. + +Docker's public key for DHI images is published at: + +- https://registry.scout.docker.com/keyring/dhi/latest.pub +- https://github.com/docker-hardened-images/keyring + +## Verify attestations with Docker Scout + +You can use the Docker Scout CLI to list and retrieve attestations for Docker +Hardened Images, including images mirrored into your organization's namespace. + +> [!NOTE] +> +> Before you run `docker scout attest` commands, ensure any image that you have +> pulled locally is up to date with the remote image. You can do this by running +> `docker pull`. If you don't do this, you may see `No attestation found`. + +### Why use Docker Scout instead of cosign directly? + +While you can use cosign to verify attestations manually, the Docker Scout CLI +offers several key advantages when working with Docker Hardened Images: + +- Purpose-built experience: Docker Scout understands the structure of DHI + attestations and image naming conventions, so you don't have to construct full + image digests or URIs manually. + +- Automatic platform resolution: With Scout, you can specify the platform (e.g., + `--platform linux/amd64`), and it automatically verifies the correct image + variant. Cosign requires you to look up the digest yourself. + +- Human-readable summaries: Scout returns summaries of attestation contents + (e.g., package counts, provenance steps), whereas cosign only returns raw + signature validation output. + +- One-step validation: The `--verify` flag in `docker scout attest get` validates + the attestation and shows the equivalent cosign command, making it easier to + understand what's happening behind the scenes. + +- Integrated with Docker Hub and DHI trust model: Docker Scout is tightly + integrated with Docker’s attestation infrastructure and public keyring, + ensuring compatibility and simplifying verification for users within the + Docker ecosystem. + +In short, Docker Scout streamlines the verification process and reduces the chances of human error, while still giving you full visibility and the option to fall back to cosign when needed. + +### List available attestations + +To list attestations for a mirrored DHI: + +```console +$ docker scout attest list /dhi-: --platform +``` + +This command shows all available attestations, including SBOMs, provenance, vulnerability reports, and more. + +### Retrieve a specific attestation + +To retrieve a specific attestation, use the `--predicate-type` flag with the full predicate type URI: + +```console +$ docker scout attest get \ + --predicate-type https://cyclonedx.org/bom/v1.5 \ + /dhi-: --platform +``` + +For example: + +```console +$ docker scout attest get \ + --predicate-type https://cyclonedx.org/bom/v1.5 \ + docs/dhi-python:3.13 --platform linux/amd64 +``` + +To retrieve only the predicate body: + +```console +$ docker scout attest get \ + --predicate-type https://cyclonedx.org/bom/v1.5 \ + --predicate \ + /dhi-: --platform +``` + +For example: + +```console +$ docker scout attest get \ + --predicate-type https://cyclonedx.org/bom/v1.5 \ + --predicate \ + docs/dhi-python:3.13 --platform linux/amd64 +``` + +### Validate the attestation with Docker Scout + +To validate the attestation using Docker Scout, you can use the `--verify` flag: + +```console +$ docker scout attest get : \ + --predicate-type https://scout.docker.com/sbom/v0.1 --verify +``` + +For example, to verify the SBOM attestation for the `dhi/node:20.19-debian12-fips-20250701182639` image: + +```console +$ docker scout attest get docs/dhi-node:20.19-debian12-fips-20250701182639 \ + --predicate-type https://scout.docker.com/sbom/v0.1 --verify +``` + +### Show the equivalent cosign command + +When using the `--verify` flag, it also prints the corresponding +[cosign](https://docs.sigstore.dev/) command to verify the image signature: + +```console +$ docker scout attest get \ + --predicate-type https://cyclonedx.org/bom/v1.5 \ + --verify \ + /dhi-: --platform +``` + +For example: + +```console +$ docker scout attest get \ + --predicate-type https://cyclonedx.org/bom/v1.5 \ + --verify \ + docs/dhi-python:3.13 --platform linux/amd64 +``` + +If verification succeeds, Docker Scout prints the full `cosign verify` command. + +Example output: + +```console + v SBOM obtained from attestation, 101 packages found + v Provenance obtained from attestation + v cosign verify registry.scout.docker.com/docker/dhi-python@sha256:b5418da893ada6272add2268573a3d5f595b5c486fb7ec58370a93217a9785ae \ + --key https://registry.scout.docker.com/keyring/dhi/latest.pub --experimental-oci11 + ... +``` + +> [!IMPORTANT] +> +> When using cosign, you must first authenticate to both the Docker Hub registry +> and the Docker Scout registry. +> +> For example: +> +> ```console +> $ docker login +> $ docker login registry.scout.docker.com +> $ cosign verify \ +> registry.scout.docker.com/docker/dhi-python@sha256:b5418da893ada6272add2268573a3d5f595b5c486fb7ec58370a93217a9785ae \ +> --key https://registry.scout.docker.com/keyring/dhi/latest.pub --experimental-oci11 +> ``` + +## Available DHI attestations + +See [available +attestations](../core-concepts/attestations.md#available-attestations) for list +of attestations available for each DHI. + +## Explore attestations on Docker Hub + +You can also browse attestations visually when [exploring an image +variant](./explore.md#view-image-variant-details). The **Attestations** section +lists each available attestation with its: + +- Type (e.g. SBOM, VEX) +- Predicate type URI +- Digest reference for use with `cosign` + +These attestations are generated and signed automatically as part of the Docker +Hardened Image build process. \ No newline at end of file diff --git a/content/manuals/dhi/images/dhi-catalog.png b/content/manuals/dhi/images/dhi-catalog.png new file mode 100644 index 000000000000..38ffabe91f0a Binary files /dev/null and b/content/manuals/dhi/images/dhi-catalog.png differ diff --git a/content/manuals/dhi/images/dhi-mirror-button.png b/content/manuals/dhi/images/dhi-mirror-button.png new file mode 100644 index 000000000000..a548de20b4c2 Binary files /dev/null and b/content/manuals/dhi/images/dhi-mirror-button.png differ diff --git a/content/manuals/dhi/images/dhi-mirror-screen.png b/content/manuals/dhi/images/dhi-mirror-screen.png new file mode 100644 index 000000000000..aaf5fddd5b8f Binary files /dev/null and b/content/manuals/dhi/images/dhi-mirror-screen.png differ diff --git a/content/manuals/dhi/images/dhi-python-mirror.png b/content/manuals/dhi/images/dhi-python-mirror.png new file mode 100644 index 000000000000..9d3252eff9d0 Binary files /dev/null and b/content/manuals/dhi/images/dhi-python-mirror.png differ diff --git a/content/manuals/dhi/images/dhi-python-search.png b/content/manuals/dhi/images/dhi-python-search.png new file mode 100644 index 000000000000..ffb58b9e185a Binary files /dev/null and b/content/manuals/dhi/images/dhi-python-search.png differ diff --git a/content/manuals/dhi/troubleshoot.md b/content/manuals/dhi/troubleshoot.md new file mode 100644 index 000000000000..c669283ca902 --- /dev/null +++ b/content/manuals/dhi/troubleshoot.md @@ -0,0 +1,81 @@ +--- +title: Troubleshoot +description: Resolve common issues when building, running, or debugging Docker Hardened Images, such as non-root behavior, missing shells, and port access. +weight: 40 +tags: [Troubleshooting] +keywords: troubleshoot hardened image, docker debug container, non-root permission issue, missing shell error, no package manager +--- + +The following are common issues you may encounter while migrating to or using +Docker Hardened Images (DHIs), along with recommended solutions. + +## General debugging + +Docker Hardened Images are optimized for security and runtime performance. As +such, they typically don't include a shell or standard debugging tools. The +recommended way to troubleshoot containers built on DHIs is by using [Docker +Debug](./how-to/debug.md). + +Docker Debug allows you to: + +- Attach a temporary debug container to your existing container. +- Use a shell and familiar tools such as `curl`, `ps`, `netstat`, and `strace`. +- Install additional tools as needed in a writable, ephemeral layer that + disappears after the session. + +## Permissions + +DHIs run as a nonroot user by default for enhanced security. This can result in +permission issues when accessing files or directories. Ensure your application +files and runtime directories are owned by the expected UID/GID or have +appropriate permissions. + +To find out which user a DHI runs as, check the repository page for the image on +Docker Hub. See [View image variant +details](./how-to/explore.md#view-image-variant-details) for more information. + +## Privileged ports + +Nonroot containers cannot bind to ports below 1024 by default. This is enforced +by both the container runtime and the kernel (especially in Kubernetes and +Docker Engine < 20.10). + +Inside the container, configure your application to listen on an unprivileged +port (1025 or higher). For example `docker run -p 80:8080 my-image` maps +port 8080 in the container to port 80 on the host, allowing you to access it +without needing root privileges. + +## No shell + +Runtime DHIs omit interactive shells like `sh` or `bash`. If your build or +tooling assumes a shell is present (e.g., for `RUN` instructions), use a `dev` +variant of the image in an earlier build stage and copy the final artifact into +the runtime image. + +To find out which shell, if any, a DHI has, check the repository page for the +image on Docker Hub. See [View image variant +details](./how-to/explore.md#view-image-variant-details) for more information. + +Also, use [Docker Debug](./how-to/debug.md) when you need shell +access to a running container. + +## Entry point differences + +DHIs may define different entry points compared to Docker Official Images (DOIs) +or other community images. + +To find out the ENTRYPOINT or CMD for a DHI, check the repository page for the +image on Docker Hub. See [View image variant +details](./how-to/explore.md#view-image-variant-details) for more information. + +## No package manager + +Runtime Docker Hardened Images are stripped down for security and minimal attack +surface. As a result, they don't include a package manager such as `apk` or +`apt`. This means you can't install additional software directly in the runtime +image. + +If your build or application setup requires installing packages (for example, to +compile code, install runtime dependencies, or add diagnostic tools), use a `dev` +variant of the image in a build stage. Then, copy only the necessary artifacts +into the final runtime image. \ No newline at end of file diff --git a/content/manuals/docker-hub/_index.md b/content/manuals/docker-hub/_index.md index f1da3b3f5b72..71f8c076729d 100644 --- a/content/manuals/docker-hub/_index.md +++ b/content/manuals/docker-hub/_index.md @@ -32,6 +32,8 @@ grid: description: Find out about new features, improvements, and bug fixes. icon: note_add link: /docker-hub/release-notes +aliases: + - /docker-hub/overview/ --- Docker Hub simplifies development with the world's largest container registry diff --git a/content/manuals/docker-hub/image-library/catalogs.md b/content/manuals/docker-hub/image-library/catalogs.md index 066ce8c56476..104543ffe457 100644 --- a/content/manuals/docker-hub/image-library/catalogs.md +++ b/content/manuals/docker-hub/image-library/catalogs.md @@ -1,5 +1,5 @@ --- -description: Explore specialized Docker Hub collections like the Generative AI catalog. +description: Explore specialized Docker Hub collections like the generative AI catalogs. keywords: Docker Hub, Hub, catalog title: Docker Hub catalogs linkTitle: Catalogs @@ -19,41 +19,42 @@ Docker Hub: - Accelerate development: Quickly integrate advanced capabilities into your applications without the hassle of extensive research or setup. -The generative AI catalog is the first catalog in Docker Hub, offering -specialized content for AI development. - -## Generative AI catalog - -The [generative AI catalog](https://hub.docker.com/catalogs/gen-ai) makes it -easy to explore and add AI capabilities to your applications. With trusted, -ready-to-use content and comprehensive documentation, you can skip the hassle of -sorting through countless tools and configurations. Instead, focus your time and -energy on creating innovative AI-powered applications. - -The generative AI catalog provides a wide range of trusted content, organized -into key areas to support diverse AI development needs: - -- Demos: Ready-to-deploy examples showcasing generative AI capabilities. These - demos provide a hands-on way to explore AI tools and frameworks, making it - easier to understand how they can be integrated into real-world applications. -- Models: Pre-trained AI models for tasks like text generation, - Natural Language Processing (NLP), and conversational AI. These models - provide a foundation for - AI applications without requiring developers to train models from scratch. -- Applications and end-to-end platforms: Comprehensive platforms and tools that - simplify AI application development, including low-code solutions and - frameworks for building multi-agent and Retrieval-Augmented Generation (RAG) - applications. -- Model deployment and serving: Tools and frameworks that enable developers to - efficiently deploy and serve AI models in production environments. These - resources include pre-configured stacks for GPUs and other specialized - hardware, ensuring performance at scale. -- Orchestration: Solutions for managing complex AI workflows, such as workflow - engines, Large Language Model (LLM) application frameworks, and lifecycle management - tools, to help streamline development and operations. -- Machine learning frameworks: Popular frameworks like TensorFlow and PyTorch - that provide the building blocks for creating, training, and fine-tuning - machine learning models. -- Databases: Databases optimized for AI workloads, including vector databases - for similarity search, time-series databases for analytics, and NoSQL - solutions for handling unstructured data. \ No newline at end of file +The following sections provide an overview of the key catalogs available in Docker Hub. + +## MCP Catalog + +The [MCP Catalog](https://hub.docker.com/mcp/) is a centralized, trusted +registry for discovering, sharing, and running Model Context Protocol +(MCP)-compatible tools. Seamlessly integrated into Docker Hub, the catalog +includes: + +- Over 100 verified MCP servers packaged as Docker images +- Tools from partners such as New Relic, Stripe, and Grafana +- Versioned releases with publisher verification +- Simplified pull-and-run support through Docker Desktop and Docker CLI + +Each server runs in an isolated container to ensure consistent behavior and +minimize configuration headaches. For developers working with Claude Desktop or +other MCP clients, the catalog provides an easy way to extend functionality with +drop-in tools. + +To learn more about MCP servers, see [MCP Catalog and Toolkit](../../ai/mcp-catalog-and-toolkit/_index.md). + +## AI Models Catalog + +The [AI Models Catalog](https://hub.docker.com/catalogs/models/) provides +curated, trusted models that work with [Docker Model +Runner](../../ai/model-runner/_index.md). This catalog is designed to make AI +development more accessible by offering pre-packaged, ready-to-use models that +you can pull, run, and interact with using familiar Docker tools. + +With the AI Models Catalog and Docker Model Runner, you can: + +- Pull and serve models from Docker Hub or any OCI-compliant registry +- Interact with models via OpenAI-compatible APIs +- Run and test models locally using Docker Desktop or CLI +- Package and publish models using the `docker model` CLI + +Whether you're building generative AI applications, integrating LLMs into your +workflows, or experimenting with machine learning tools, the AI Models Catalog +simplifies the model management experience. diff --git a/content/manuals/docker-hub/image-library/mirror.md b/content/manuals/docker-hub/image-library/mirror.md index 94a87e866b3c..9ab5fdf7b4e6 100644 --- a/content/manuals/docker-hub/image-library/mirror.md +++ b/content/manuals/docker-hub/image-library/mirror.md @@ -45,6 +45,21 @@ Hub can be mirrored. The Registry can be configured as a pull through cache. In this mode a Registry responds to all normal docker pull requests but stores all content locally. +### Using Registry Access Management (RAM) with a registry mirror + +If Docker Hub access is restricted via your Registry Access Management (RAM) configuration, you will not be able to pull images originating from Docker Hub even if the images are available in your registry mirror. + +You will encounter the following error: +```console +Error response from daemon: Access to docker.io has been restricted by your administrators. +``` + +If you are unable to allow access to Docker Hub, you can manually pull from your registry mirror and optionally, retag the image. For example: +```console +docker pull [:]/library/busybox +docker tag [:]/library/busybox:latest busybox:latest +``` + ## How does it work? The first time you request an image from your local registry mirror, it pulls @@ -79,10 +94,6 @@ Multiple registry caches can be deployed over the same back-end. A single registry cache ensures that concurrent requests do not pull duplicate data, but this property does not hold true for a registry cache cluster. -> [!NOTE] -> -> When using Docker Hub, all paid Docker subscriptions are limited to 5000 pulls per day. If you require a higher number of pulls, you can purchase an Enhanced Service Account add-on. See [Service Accounts](/docker-hub/service-accounts/) for more details. - ### Configure the cache To configure a Registry to run as a pull through cache, the addition of a diff --git a/content/manuals/docker-hub/image-library/search.md b/content/manuals/docker-hub/image-library/search.md index fff381ae49da..a78f57a39c68 100644 --- a/content/manuals/docker-hub/image-library/search.md +++ b/content/manuals/docker-hub/image-library/search.md @@ -104,7 +104,7 @@ Categories group images based on their primary use case, helping you quickly locate the tools and resources you need to build, deploy, and run your applications. -{{< include "hub-categories.md" >}} +{{% include "hub-categories.md" %}} ### Operating systems diff --git a/content/manuals/docker-hub/image-library/trusted-content.md b/content/manuals/docker-hub/image-library/trusted-content.md index 518ccfce6dbb..1150abc46f83 100644 --- a/content/manuals/docker-hub/image-library/trusted-content.md +++ b/content/manuals/docker-hub/image-library/trusted-content.md @@ -137,7 +137,7 @@ Docker Hub for examples on how to install packages if you are unfamiliar. ### Codenames Tags with words that look like Toy Story characters (for example, `bookworm`, -`bullseye`, and `trixie`) or adjectives (such as `focal`, `jammy`, and +`bullseye`, and `trixie`) or adjectives (such as `jammy`, and `noble`), indicate the codename of the Linux distribution they use as a base image. Debian release codenames are [based on Toy Story characters](https://en.wikipedia.org/wiki/Debian_version_history#Naming_convention), and Ubuntu's take the form of "Adjective Animal". For example, the diff --git a/content/manuals/docker-hub/quickstart.md b/content/manuals/docker-hub/quickstart.md index 6d1f1d29fd0e..c3b646d0ead4 100644 --- a/content/manuals/docker-hub/quickstart.md +++ b/content/manuals/docker-hub/quickstart.md @@ -115,7 +115,7 @@ You can run images from Docker Hub using the CLI or Docker Desktop Dashboard. The container logs appear after the container starts. 5. Select the **8080:80** link to open the server, or visit - [https://localhost:8080](https://localhost:8080) in your web browser. + [http://localhost:8080](http://localhost:8080) in your web browser. 6. In the Docker Desktop Dashboard, select the **Stop** button to stop the container. @@ -174,7 +174,7 @@ You can run images from Docker Hub using the CLI or Docker Desktop Dashboard. ... ``` -3. Visit [https://localhost:8080](https://localhost:8080) to view the default +3. Visit [http://localhost:8080](http://localhost:8080) to view the default Nginx page and verify that the container is running. 4. In the terminal, press Ctrl+C to stop the container. @@ -241,7 +241,7 @@ customize your own images to suit specific needs. $ docker run -p 8080:80 --rm /nginx-custom ``` -4. Visit [https://localhost:8080](https://localhost:8080) to view the page. You +4. Visit [http://localhost:8080](http://localhost:8080) to view the page. You should see `Hello world from Docker!`. 5. In the terminal, press CTRL+C to stop the container. @@ -323,4 +323,3 @@ these options. Add [repository information](./repos/manage/information.md) to help users find and use your image. - diff --git a/content/manuals/docker-hub/release-notes.md b/content/manuals/docker-hub/release-notes.md index 18efecb5d33a..d93b4ad0e06f 100644 --- a/content/manuals/docker-hub/release-notes.md +++ b/content/manuals/docker-hub/release-notes.md @@ -13,7 +13,11 @@ tags: [Release notes] Here you can learn about the latest changes, new features, bug fixes, and known issues for each Docker Hub release. -Take a look at the [Docker Public Roadmap](https://github.com/orgs/docker/projects/51/views/1?filterQuery=) to see what's coming next. +## 2025-02-18 + +### New + +- You can delete images and image indexes using [Image Management](./repos/manage/hub-images/manage.md). ## 2024-12-12 @@ -80,7 +84,7 @@ Take a look at the [Docker Public Roadmap](https://github.com/orgs/docker/projec ### Bug fixes and enhancements -- In Docker Hub, you can now download a [registry.json](../security/for-admins/enforce-sign-in/_index.md) file or copy the commands to create a registry.json file to enforce sign-in for your organization. +- In Docker Hub, you can now download a [registry.json](/manuals/enterprise/security/enforce-sign-in/_index.md) file or copy the commands to create a registry.json file to enforce sign-in for your organization. ## 2022-09-19 @@ -110,7 +114,7 @@ Take a look at the [Docker Public Roadmap](https://github.com/orgs/docker/projec ### New -- [Registry Access Management](/manuals/security/for-admins/hardened-desktop/registry-access-management.md) is now available for all Docker Business subscriptions. When enabled, your users can access specific registries in Docker Hub. +- [Registry Access Management](/manuals/enterprise/security/hardened-desktop/registry-access-management.md) is now available for all Docker Business subscriptions. When enabled, your users can access specific registries in Docker Hub. ## 2022-05-03 @@ -260,7 +264,7 @@ Each organization page now breaks down into these tabs: ### New features -* You can now [create personal access tokens](/security/for-developers/access-tokens/) in Docker Hub and use them to authenticate from the Docker CLI. Find them in your account settings, under the new **[Security](https://hub.docker.com/settings/security)** section. +* You can now [create personal access tokens](/security/access-tokens/) in Docker Hub and use them to authenticate from the Docker CLI. Find them in your account settings, under the new **[Security](https://hub.docker.com/settings/security)** section. ### Known Issues @@ -277,7 +281,6 @@ Each organization page now breaks down into these tabs: * Scan results don't appear for some official images. - ## 2019-09-05 ### Enhancements diff --git a/content/manuals/docker-hub/repos/archive.md b/content/manuals/docker-hub/repos/archive.md index 18c89bfccc27..079059b598aa 100644 --- a/content/manuals/docker-hub/repos/archive.md +++ b/content/manuals/docker-hub/repos/archive.md @@ -35,7 +35,7 @@ unarchived, the following occurs: ## Archive a repository 1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select **Repositories**. +2. Select **My Hub** > **Repositories**. A list of your repositories appears. @@ -51,7 +51,7 @@ unarchived, the following occurs: ## Unarchive a repository 1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select **Repositories**. +2. Select **My Hub** > **Repositories**. A list of your repositories appears. diff --git a/content/manuals/docker-hub/repos/create.md b/content/manuals/docker-hub/repos/create.md index 3cc4d97c826a..9e20df6710c0 100644 --- a/content/manuals/docker-hub/repos/create.md +++ b/content/manuals/docker-hub/repos/create.md @@ -8,7 +8,7 @@ weight: 20 --- 1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select **Repositories**. +2. Select **My Hub** > **Repositories**. 3. Near the top-right corner, select **Create repository**. 4. Select a **Namespace**. @@ -39,7 +39,7 @@ weight: 20 is only accessible to you and collaborators. In addition, if you selected an organization's namespace, then the repository is accessible to those with applicable roles or permissions. For more details, see [Roles and - permissions](../../security/for-admins/roles-and-permissions.md). + permissions](/manuals/enterprise/security/roles-and-permissions.md). > [!NOTE] > diff --git a/content/manuals/docker-hub/repos/delete.md b/content/manuals/docker-hub/repos/delete.md index 46e27c1ea2e9..3de3414547b4 100644 --- a/content/manuals/docker-hub/repos/delete.md +++ b/content/manuals/docker-hub/repos/delete.md @@ -13,7 +13,7 @@ weight: 40 > settings. This action can't be undone. 1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select **Repositories**. +2. Select **My Hub** > **Repositories**. A list of your repositories appears. diff --git a/content/manuals/docker-hub/repos/manage/access.md b/content/manuals/docker-hub/repos/manage/access.md index ed190257f327..b9fee8c042ad 100644 --- a/content/manuals/docker-hub/repos/manage/access.md +++ b/content/manuals/docker-hub/repos/manage/access.md @@ -39,7 +39,7 @@ change the visibility after the repository has been created. To change repository visibility: 1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select **Repositories**. +2. Select **My Hub** > **Repositories**. 3. Select a repository. The **General** page for the repository appears. @@ -75,7 +75,7 @@ teams, or organization access tokens to manage access. 1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select **Repositories**. +2. Select **My Hub** > **Repositories**. A list of your repositories appears. @@ -94,7 +94,7 @@ repository from that repository's **Settings** page. Organizations can use roles for individuals, giving them different permissions in the organization. For more details, see [Roles and -permissions](/manuals/security/for-admins/roles-and-permissions.md). +permissions](/manuals/enterprise/security/roles-and-permissions.md). ## Organization teams @@ -111,7 +111,7 @@ To configure team repository permissions: 1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select **Repositories**. +2. Select **My Hub** > **Repositories**. A list of your repositories appears. @@ -131,4 +131,45 @@ To configure team repository permissions: Organizations can use OATs. OATs let you assign fine-grained repository access permissions to tokens. For more details, see [Organization access -tokens](/manuals/security/for-admins/access-tokens.md). +tokens](/manuals/enterprise/security/access-tokens.md). + +## Gated distribution + +{{< summary-bar feature_name="Gated distribution" >}} + +Gated distribution allows publishers to securely share private container images with external customers or partners, without giving them full organization access or visibility into your teams, collaborators, or other repositories. + +This feature is ideal for commercial software publishers who want to control who can pull specific images while preserving a clean separation between internal users and external consumers. + +If you are interested in Gated Distribution contact the [Docker Sales Team](https://www.docker.com/pricing/contact-sales/) for more information. + +### Key features + +- **Private repository distribution**: Content is stored in private repositories and only accessible to explicitly invited users. + +- **External access without organization membership**: External users don't need to be added to your internal organization to pull images. + +- **Pull-only permissions**: External users receive pull-only access and cannot push or modify repository content. + +- **Invite-only access**: Access is granted through authenticated email invites, managed via API. + +### Invite distributor members via API + +> [!NOTE] +> When you invite members, you assign them a role. See [Roles and permissions](/manuals/enterprise/security/roles-and-permissions.md) for details about the access permissions for each role. + +Distributor members (used for gated distribution) can only be invited using the Docker Hub API. UI-based invitations are not currently supported for this role. To invite distributor members, use the Bulk create invites API endpoint. + +To invite distributor members: + +1. Use the [Authentication API](https://docs.docker.com/reference/api/hub/latest/#tag/authentication-api/operation/AuthCreateAccessToken) to generate a bearer token for your Docker Hub account. + +2. Create a team in the Hub UI or use the [Teams API](https://docs.docker.com/reference/api/hub/latest/#tag/groups/paths/~1v2~1orgs~1%7Borg_name%7D~1groups/post). + +3. Grant repository access to the team: + - In the Hub UI: Navigate to your repository settings and add the team with "Read-only" permissions + - Using the [Repository Teams API](https://docs.docker.com/reference/api/hub/latest/#tag/repositories/paths/~1v2~1repositories~1%7Bnamespace%7D~1%7Brepository%7D~1groups/post): Assign the team to your repositories with "read-only" access level + +4. Use the [Bulk create invites endpoint](https://docs.docker.com/reference/api/hub/latest/#tag/invites/paths/~1v2~1invites~1bulk/post) to send email invites with the distributor member role. In the request body, set the "role" field to "distributor_member". + +5. The invited user will receive an email with a link to accept the invite. After signing in with their Docker ID, they'll be granted pull-only access to the specified private repository as a distributor member. diff --git a/content/manuals/docker-hub/repos/manage/builds/automated-testing.md b/content/manuals/docker-hub/repos/manage/builds/automated-testing.md index e01bc9cce4a7..79d381757cf6 100644 --- a/content/manuals/docker-hub/repos/manage/builds/automated-testing.md +++ b/content/manuals/docker-hub/repos/manage/builds/automated-testing.md @@ -73,7 +73,7 @@ branch or tag, and you do not need to enable it at all. Only branches that are configured to use autobuild push images to the Docker repository, regardless of the Autotest settings. -1. Sign in to Docker Hub and select **Repositories**. +1. Sign in to Docker Hub and select **My Hub** > **Repositories**. 2. Select the repository you want to enable `Autotest` on. diff --git a/content/manuals/docker-hub/repos/manage/builds/images/docker-hub-repo-settings-menu.png b/content/manuals/docker-hub/repos/manage/builds/images/docker-hub-repo-settings-menu.png deleted file mode 100644 index 727359beda4e..000000000000 Binary files a/content/manuals/docker-hub/repos/manage/builds/images/docker-hub-repo-settings-menu.png and /dev/null differ diff --git a/content/manuals/docker-hub/repos/manage/builds/link-source.md b/content/manuals/docker-hub/repos/manage/builds/link-source.md index 026e3872e874..61e270190cc7 100644 --- a/content/manuals/docker-hub/repos/manage/builds/link-source.md +++ b/content/manuals/docker-hub/repos/manage/builds/link-source.md @@ -27,12 +27,9 @@ If you are linking a source code provider to create autobuilds for a team, follo 1. Sign in to Docker Hub. -2. Select the **Settings** icon in the top-right navigation, then select **Repository Settings**. +2. Select **My Hub** > **Settings** > **Linked accounts**. - ![Repository settings menu](./images/docker-hub-repo-settings-menu.png) - - -3. From the **Linked accounts** tab, select **Link provider** for the source provider you want to link. +3. Select **Link provider** for the source provider you want to link. If you want to unlink your current GitHub account and relink to a new GitHub account, make sure to completely sign out of [GitHub](https://github.com/) before linking via Docker Hub. @@ -102,9 +99,9 @@ To revoke Docker Hub's access to an organization's GitHub repositories: To revoke Docker Hub's access to your GitHub account, you must unlink it both from Docker Hub, and from your GitHub account. -1. Select the **Settings** icon in the top-right navigation, then select **Repository Settings**. +1. Select **My Hub** > **Settings** > **Linked accounts**. -2. From the **Linked accounts** tab, select the plug icon next to the source provider you want to remove. +2. Select **Unlink provider** next to the source provider you want to remove. 3. Go to your GitHub account's **Settings** page. @@ -123,9 +120,9 @@ code provider. 1. Sign in to Docker Hub using your Docker ID. -2. Select the **Settings** icon in the top-right navigation, then select **Repository Settings**. +2. Select **My Hub** > **Settings** > **Linked accounts**. -3. From the **Linked accounts** tab, select **Link provider** for the source provider you want to link. +3. Select **Link provider** for the source provider you want to link. 4. If necessary, sign in to Bitbucket. @@ -138,9 +135,9 @@ unlink it both from Docker Hub, and revoke authorization in your Bitbucket accou 1. Sign in to Docker Hub. -2. Select the **Settings** icon in the top-right navigation, then select **Repository Settings**. +2. Select **My Hub** > **Settings** > **Linked accounts**. -3. From the **Linked accounts** tab, select the **Plug** icon next to the source provider you want to remove. +3. Select **Unlink provider** next to the source provider you want to remove. > [!IMPORTANT] > After unlinking the account on Docker Hub, you must also revoke the authorization on the Bitbucket end. diff --git a/content/manuals/docker-hub/repos/manage/builds/manage-builds.md b/content/manuals/docker-hub/repos/manage/builds/manage-builds.md index c199e918a6f8..d1951da03592 100644 --- a/content/manuals/docker-hub/repos/manage/builds/manage-builds.md +++ b/content/manuals/docker-hub/repos/manage/builds/manage-builds.md @@ -48,7 +48,7 @@ a while, for example when you are doing major refactoring in your code. Disablin To disable an automated build: -1. From the **Repositories** page, select a repository, and select the **Builds** tab. +1. In [Docker Hub](https://hub.docker.com), go to **My Hub** > **Repositories**, select a repository, and select the **Builds** tab. 2. Select **Configure automated builds** to edit the repository's build settings. diff --git a/content/manuals/docker-hub/repos/manage/builds/setup.md b/content/manuals/docker-hub/repos/manage/builds/setup.md index 34e12314e87f..38a97f360eba 100644 --- a/content/manuals/docker-hub/repos/manage/builds/setup.md +++ b/content/manuals/docker-hub/repos/manage/builds/setup.md @@ -22,7 +22,7 @@ build an image each time you push new code to your source provider. If you have [automated tests](automated-testing.md) configured, the new image is only pushed when the tests succeed. -1. From the **Repositories** section, select a repository to view its details. +1. In [Docker Hub](https://hub.docker.com), go to **My Hub** > **Repositories**, and select a repository to view its details. 2. Select the **Builds** tab. @@ -150,7 +150,7 @@ destination Docker tag to set up an automated build. You can also: create dynamic tags All of these options are available from the **Build configuration** screen for -each repository. Select **Repositories** from the left navigation, and select the name of the repository you want to edit. Select the **Builds** tab, and then select **Configure Automated builds**. +each repository. In [Docker Hub](https://hub.docker.com), select **My Hub** > **Repositories**, and select the name of the repository you want to edit. Select the **Builds** tab, and then select **Configure Automated builds**. ### Tag and branch builds diff --git a/content/manuals/docker-hub/repos/manage/hub-images/_index.md b/content/manuals/docker-hub/repos/manage/hub-images/_index.md index 726eb7889254..7d522bdc00ce 100644 --- a/content/manuals/docker-hub/repos/manage/hub-images/_index.md +++ b/content/manuals/docker-hub/repos/manage/hub-images/_index.md @@ -12,9 +12,12 @@ version-controlled, and easy to share. This section covers key image management tasks, including tagging, pushing images, transferring images between repositories, and supported software artifacts. + - [Tags](./tags.md): Tags help you version and organize different iterations of your images within a single repository. This topic explains tagging and provides guidance on how to create, view, and delete tags in Docker Hub. +- [Image Management](./manage.md): Manage your images and image indexes to + optimize your repository storage. - [Software artifacts](./oci-artifacts.md): Docker Hub supports OCI (Open Container Initiative) artifacts, allowing you to store, manage, and distribute a range of content beyond standard Docker images, including Helm charts, diff --git a/content/manuals/docker-hub/repos/manage/hub-images/images/image-index.svg b/content/manuals/docker-hub/repos/manage/hub-images/images/image-index.svg new file mode 100644 index 000000000000..321721c245ac --- /dev/null +++ b/content/manuals/docker-hub/repos/manage/hub-images/images/image-index.svg @@ -0,0 +1,2 @@ +Image indexImage indexImageImageImageImageImageImageImageImageImage \ No newline at end of file diff --git a/content/manuals/docker-hub/repos/manage/hub-images/immutable-tags.md b/content/manuals/docker-hub/repos/manage/hub-images/immutable-tags.md new file mode 100644 index 000000000000..d01fee36c019 --- /dev/null +++ b/content/manuals/docker-hub/repos/manage/hub-images/immutable-tags.md @@ -0,0 +1,60 @@ +--- +description: Learn about immutable tags and how they help maintain image version consistency on Docker Hub. +keywords: Docker Hub, Hub, repository content, tags, immutable tags, version control +title: Immutable tags on Docker Hub +linkTitle: Immutable tags +weight: 11 +--- +{{< summary-bar feature_name="Immutable tags" >}} + +Immutable tags provide a way to ensure that specific image versions remain unchanged once they are published to Docker Hub. This feature helps maintain consistency and reliability in your container deployments by preventing accidental overwrites of important image versions. + +## What are immutable tags? + +Immutable tags are image tags that, once pushed to Docker Hub, cannot be overwritten or deleted. This ensures that a specific version of an image remains exactly the same throughout its lifecycle, providing: + +- Version consistency +- Reproducible builds +- Protection against accidental overwrites +- Better security and compliance + +## Enable immutable tags + +To enable immutable tags for your repository: + +1. Sign in to [Docker Hub](https://hub.docker.com). +2. Select **My Hub** > **Repositories**. +3. Select the repository where you want to enable immutable tags. +4. Go to **Settings** > **General**. +5. Under **Tag mutability settings**, select one of the following options: + - **All tags are mutable (Default)**: + Tags can be changed to reference a different image. This lets you retarget a tag without creating a new one. + - **All tags are immutable**: + Tags cannot be updated to point to a different image after creation. This ensures consistency and prevents accidental changes. This includes the `latest` tag. + - **Specific tags are immutable**: + Define specific tags that cannot be updated after creation using regex values. +6. Select **Save**. + +Once enabled, all tags are locked to their specific images, ensuring that each tag always points to the same image version and cannot be modified. + +> [!NOTE] +> This implementation of regular expressions follows the [Go regexp package](https://pkg.go.dev/regexp), which is based on the RE2 engine. For more information, visit [RE2 Regular Expression Syntax](https://github.com/google/re2/wiki/Syntax). + +## Working with immutable tags + +When immutable tags are enabled: + +- You cannot push a new image with the same tag name +- You must use a new tag name for each new image version + +To push an image, create a new tag for your updated image and push it to the repository. + + + + + + + + + + diff --git a/content/manuals/docker-hub/repos/manage/hub-images/manage.md b/content/manuals/docker-hub/repos/manage/hub-images/manage.md new file mode 100644 index 000000000000..f69c1cca9362 --- /dev/null +++ b/content/manuals/docker-hub/repos/manage/hub-images/manage.md @@ -0,0 +1,57 @@ +--- +description: Discover how to delete image tags. +keywords: Docker Hub, Hub, tags, delete +title: Image Management +linktitle: Image Management +weight: 12 +--- + +{{< summary-bar feature_name="Image management" >}} + +Images and image indexes are the foundation of container images within a +repository. The following diagram shows the relationship between images and +image indexes. + + ![a pretty wide image](./images/image-index.svg) + +This structure enables multi-architecture support through a single reference. It +is important to note that images are not always referenced by an image index. +The following objects are shown in the diagram. + +- Image index: An image that points to multiple architecture-specific images + (like AMD and ARM), letting a single reference work across different + platforms. +- Image: Individual container images that contain the actual configuration and + layers for a specific architecture and operating system. + +## Manage repository images and image indexes + +1. Sign in to [Docker Hub](https://hub.docker.com). +2. Select **My Hub** > **Repositories**. +3. In the list, select a repository. +4. Select **Image Management**. +5. Search, filter, or sort the items. + - Search: In the search box above the list, specify your search. + - Filter: In the **Filter by** drop-down, select **Tagged**, **Image index**, + or **Image**. + - Sort: Select the column title for **Size**, **Last pushed**, or **Last + pulled**. + + > [!NOTE] + > + > Images that haven't been pulled in over 6 months are marked as **Stale** in + > the **Status** column. + +6. Optional. Delete one or more items. + 1. Select the checkboxes next to the items in the list. Selecting any + top-level index also removes any underlying images that aren't referenced + elsewhere. + 2. Select **Preview and delete**. + 3. In the window that appears, verify the items that will be deleted and the + amount of storage you will reclaim. + 4. Select **Delete forever**. + + + > [!NOTE] + > + > If you would like to delete in bulk, you can use the [deletion API endpoint](/reference/api/registry/latest/#tag/delete). diff --git a/content/manuals/docker-hub/repos/manage/hub-images/move.md b/content/manuals/docker-hub/repos/manage/hub-images/move.md index fc267f3a8e62..aa1ebd6273ba 100644 --- a/content/manuals/docker-hub/repos/manage/hub-images/move.md +++ b/content/manuals/docker-hub/repos/manage/hub-images/move.md @@ -42,7 +42,7 @@ The private images that existed in your previous account are now available in yo To avoid losing your private images, you can pull your private images from your personal account and push them to an organization that's owned by you. -1. Navigate to [Docker Hub](https://hub.docker.com) and select **Organizations**. +1. Navigate to [Docker Hub](https://hub.docker.com) and select **My Hub**. 2. Select the applicable organization and verify that your user account is a member of the organization. 3. Sign in to [Docker Hub](https://hub.docker.com) using your original Docker account, and pull your images: diff --git a/content/manuals/docker-hub/repos/manage/hub-images/tags.md b/content/manuals/docker-hub/repos/manage/hub-images/tags.md index 35db6a1866a8..6768ce9d782b 100644 --- a/content/manuals/docker-hub/repos/manage/hub-images/tags.md +++ b/content/manuals/docker-hub/repos/manage/hub-images/tags.md @@ -33,7 +33,7 @@ The image is then uploaded and available for use in Docker Hub. You can view the available tags and the size of the associated image. 1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select **Repositories**. +2. Select **My Hub** > **Repositories**. A list of your repositories appears. @@ -51,7 +51,7 @@ Only the repository owner or other team members with granted permissions can delete tags. 1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select **Repositories**. +2. Select **My Hub** > **Repositories**. A list of your repositories appears. diff --git a/content/manuals/docker-hub/repos/manage/information.md b/content/manuals/docker-hub/repos/manage/information.md index 06f494da5a41..8be904423ef3 100644 --- a/content/manuals/docker-hub/repos/manage/information.md +++ b/content/manuals/docker-hub/repos/manage/information.md @@ -46,7 +46,7 @@ effective in search results, driving more relevant traffic to your repository. 1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select **Repositories**. +2. Select **My Hub** > **Repositories**. A list of your repositories appears. @@ -86,7 +86,7 @@ Consider the following repository overview best practices. 1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select **Repositories**. +2. Select **My Hub** > **Repositories**. A list of your repositories appears. @@ -114,7 +114,7 @@ explore content for the problem domain that they're interested in. The Docker Hub content team maintains a curated list of categories. -{{< include "hub-categories.md" >}} +{{% include "hub-categories.md" %}} ### Auto-generated categories @@ -142,7 +142,7 @@ You can tag a repository with up to three categories. To edit the categories of a repository: 1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select **Repositories**. +2. Select **My Hub** > **Repositories**. A list of your repositories appears. diff --git a/content/manuals/docker-hub/repos/manage/trusted-content/_index.md b/content/manuals/docker-hub/repos/manage/trusted-content/_index.md index 3b29c8873da4..0ae04840e94a 100644 --- a/content/manuals/docker-hub/repos/manage/trusted-content/_index.md +++ b/content/manuals/docker-hub/repos/manage/trusted-content/_index.md @@ -3,6 +3,8 @@ description: Learn how to manage and contribute to trusted content. keywords: Docker Hub, Hub, trusted content title: Trusted content weight: 100 +aliases: + - /trusted-content/ --- Docker's trusted content programs ensure that container images meet the highest diff --git a/content/manuals/docker-hub/repos/manage/trusted-content/insights-analytics.md b/content/manuals/docker-hub/repos/manage/trusted-content/insights-analytics.md index 0ffd17abcb5c..078fda8f0288 100644 --- a/content/manuals/docker-hub/repos/manage/trusted-content/insights-analytics.md +++ b/content/manuals/docker-hub/repos/manage/trusted-content/insights-analytics.md @@ -61,7 +61,7 @@ You can download extension CSV reports from the **Insights and analytics** dashb ## Exporting analytics data You can export the analytics data either from the web dashboard, or using the -[DVP Data API](/reference/api/hub/dvp.md). All members of an organization have access to the analytics data. +[DVP Data API](/reference/api/dvp/latest.md). All members of an organization have access to the analytics data. The data is available as a downloadable CSV file, in a weekly (Monday through Sunday) or monthly format. Monthly data is available from the first day of the @@ -72,9 +72,9 @@ can analyze it manually as a spreadsheet. Export usage data for your organization's images using the Docker Hub website by following these steps: -1. Sign in to [Docker Hub](https://hub.docker.com/) and select **Organizations**. +1. Sign in to [Docker Hub](https://hub.docker.com/) and select **My Hub**. -2. Choose your organization and select **Insights and analytics**. +2. Choose your organization and select **Analytics**. ![Organization overview page, with the Insights and Analytics tab](../../../images/organization-tabs.png) @@ -89,7 +89,7 @@ Export usage data for your organization's images using the Docker Hub website by The HTTP API endpoints are available at: `https://hub.docker.com/api/publisher/analytics/v1`. Learn how to export data -using the API in the [DVP Data API documentation](/reference/api/hub/dvp.md). +using the API in the [DVP Data API documentation](/reference/api/dvp/latest.md). ## Data points diff --git a/content/manuals/docker-hub/repos/manage/vulnerability-scanning.md b/content/manuals/docker-hub/repos/manage/vulnerability-scanning.md index e9d14d3c8921..9a21813daa46 100644 --- a/content/manuals/docker-hub/repos/manage/vulnerability-scanning.md +++ b/content/manuals/docker-hub/repos/manage/vulnerability-scanning.md @@ -26,7 +26,7 @@ see [Docker Scout](/manuals/scout/_index.md). ### Turn on Docker Scout image analysis 1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select **Repositories**. +2. Select **My Hub** > **Repositories**. A list of your repositories appears. @@ -41,7 +41,7 @@ see [Docker Scout](/manuals/scout/_index.md). ### Turn off Docker Scout image analysis 1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select **Repositories**. +2. Select **My Hub** > **Repositories**. A list of your repositories appears. @@ -108,7 +108,7 @@ To enable static vulnerability scanning: > architecture, Linux OS, and are less than 10 GB in size. 1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select **Repositories**. +2. Select **My Hub** > **Repositories**. A list of your repositories appears. @@ -130,7 +130,7 @@ repository for which you have turned on scanning. To view the vulnerability report: 1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select **Repositories**. +2. Select **My Hub** > **Repositories**. A list of your repositories appears. @@ -191,7 +191,7 @@ Repository owners and administrators can disable static vulnerability scanning on a repository. To disable scanning: 1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select **Repositories**. +2. Select **My Hub** > **Repositories**. A list of your repositories appears. diff --git a/content/manuals/docker-hub/repos/settings.md b/content/manuals/docker-hub/repos/settings.md index 279295d458fb..3b0f917d1eb3 100644 --- a/content/manuals/docker-hub/repos/settings.md +++ b/content/manuals/docker-hub/repos/settings.md @@ -22,9 +22,8 @@ creates the repository with your default repository privacy. ### Configure default repository privacy 1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select **Repositories**. -3. Near the top-right corner, select the settings icon and then **Repository Settings**. -4. Select the **Default privacy** for any new repository created. +2. Select **My Hub** > **Settings** > **Default privacy**. +3. Select the **Default privacy** for any new repository created. - **Public**: All new repositories appear in Docker Hub search results and can be pulled by everyone. @@ -33,7 +32,7 @@ creates the repository with your default repository privacy. repository is created in an organization's namespace, then the repository is accessible to those with applicable roles or permissions. -5. Select **Save**. +4. Select **Save**. ## Autobuild notifications @@ -43,13 +42,11 @@ autobuilds. ### Configure autobuild notifications 1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select **Repositories**. -3. Near the top-right corner, select the settings icon and then **Repository Settings**. -4. Select the **Notifications** -5. Select the notifications to receive by email. +2. Select **My Hub** > **Repositories** > **Settings** > **Notifications**. +3. Select the notifications to receive by email. - **Off**: No notifications. - **Only failures**: Only notifications about failed builds. - **Everything**: Notifications for successful and failed builds. -6. Select **Save**. +4. Select **Save**. diff --git a/content/manuals/docker-hub/service-accounts.md b/content/manuals/docker-hub/service-accounts.md index 75d2390a2876..7d3150756590 100644 --- a/content/manuals/docker-hub/service-accounts.md +++ b/content/manuals/docker-hub/service-accounts.md @@ -5,7 +5,7 @@ title: Service accounts weight: 50 --- -{{< include "new-plans.md" >}} +{{% include "new-plans.md" %}} > [!IMPORTANT] > @@ -13,10 +13,10 @@ weight: 50 > available. Existing Service Account agreements will be honored until their > current term expires, but new purchases or renewals of Enhanced Service > Account add-ons are no longer available and customers must renew under a new -> subscription plan. +> subscription. > > Docker recommends transitioning to [Organization Access Tokens -> (OATs)](../security/for-admins/access-tokens.md), which can provide similar +> (OATs)](/manuals/enterprise/security/access-tokens.md), which can provide similar > functionality. A service account is a Docker ID used for automated management of container images or containerized applications. Service accounts are typically used in automated workflows, and don't share Docker IDs with the members in the organization. Common use cases for service accounts include mirroring content on Docker Hub, or tying in image pulls from your CI/CD process. diff --git a/content/manuals/docker-hub/troubleshoot.md b/content/manuals/docker-hub/troubleshoot.md new file mode 100644 index 000000000000..e0976f250d44 --- /dev/null +++ b/content/manuals/docker-hub/troubleshoot.md @@ -0,0 +1,82 @@ +--- +description: Learn how to troubleshoot common Docker Hub issues. +keywords: hub, troubleshoot +title: Troubleshoot Docker Hub +linkTitle: Troubleshoot +weight: 60 +tags: [Troubleshooting] +toc_max: 2 +--- + +If you experience issues with Docker Hub, refer to the following solutions. + +## You have reached your pull rate limit (429 response code) + +### Error message + +When this issue occurs, you receive following error message in the Docker CLI or +in the Docker Engine logs: + +```text +You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limits +``` + +### Possible causes + +- You have reached your pull rate limit as an authenticated Docker Personal + user. +- You have reached your pull rate limit as an unauthenticated user based on your + IPv4 address or IPv6 /64 subnet. + +### Solution + +You can use one of the following solutions: + +- [Authenticate](./usage/pulls.md#authentication) or + [upgrade](../subscription/change.md#upgrade-your-subscription) your Docker + account. +- [View your pull rate limit](./usage/pulls.md#view-hourly-pull-rate-and-limit), + wait until your pull rate limit decreases, and then try again. + +## Too many requests (429 response code) + +### Error message + +When this issue occurs, you receive following error message in the Docker CLI or +in the Docker Engine logs: + +```text +Too Many Requests +``` + +### Possible causes + +- You have reached the [Abuse rate limit](./usage/_index.md#abuse-rate-limit). + +### Solution + +1. Check for broken CI/CD pipelines accessing Docker Hub and fix them. +2. Implement a retry with back-off solution in your automated scripts to ensure + that you're not resending thousands of requests per minute. + +## 500 response code + +### Error message + +When this issue occurs, the following error message is common in the Docker CLI +or in the Docker Engine logs: + +```text +Unexpected status code 500 +``` + +### Possible causes + +- There is a temporary Docker Hub service issue. + +### Solution + +1. View the [Docker System Status Page](https://www.dockerstatus.com/) and + verify that all services are operational. +2. Try accessing Docker Hub again. It may be a temporary issue. +3. [Contact Docker Support](https://www.docker.com/support/) to report the issue. \ No newline at end of file diff --git a/content/manuals/docker-hub/usage/_index.md b/content/manuals/docker-hub/usage/_index.md index f7311adf01c4..44f201ea5f6e 100644 --- a/content/manuals/docker-hub/usage/_index.md +++ b/content/manuals/docker-hub/usage/_index.md @@ -8,30 +8,19 @@ aliases: /docker-hub/download-rate-limit/ --- -{{< include "hub-limits.md" >}} - -When using Docker Hub, unauthenticated and Docker Personal users are subject to -strict limits. In contrast, Docker Pro, Team, and Business users benefit from a -consumption-based model with a base amount of included usage. This included -usage is not a hard limit; users can scale or upgrade their subscriptions to -receive additional usage or use on-demand usage. - The following table provides an overview of the included usage and limits for each user type, subject to fair use: -| User type | Pulls per month | Pull rate limit per hour | Public repositories | Public repository storage | Private repositories | Private repository storage | -|--------------------------|-----------------|--------------------------|---------------------|---------------------------|----------------------|----------------------------| -| Business (authenticated) | 1M | Unlimited | Unlimited | Unlimited | Unlimited | Up to 500 GB | -| Team (authenticated) | 100K | Unlimited | Unlimited | Unlimited | Unlimited | Up to 50 GB | -| Pro (authenticated) | 25K | Unlimited | Unlimited | Unlimited | Unlimited | Up to 5 GB | -| Personal (authenticated) | Not applicable | 40 | Unlimited | Unlimited | Up to 1 | Up to 2 GB | -| Unauthenticated users | Not applicable | 10 per IP address | Not applicable | Not applicable | Not applicable | Not applicable | - -For more details, see the following: +| User type | Pull rate limit per 6 hours | Number of public repositories | Number of private repositories | +|--------------------------|-----------------------------------------|-------------------------------|--------------------------------| +| Business (authenticated) | Unlimited | Unlimited | Unlimited | +| Team (authenticated) | Unlimited | Unlimited | Unlimited | +| Pro (authenticated) | Unlimited | Unlimited | Unlimited | +| Personal (authenticated) | 200 | Unlimited | Up to 1 | +| Unauthenticated users | 100 per IPv4 address or IPv6 /64 subnet | Not applicable | Not applicable | -- [Pull usage and limits](./pulls.md) -- [Storage usage and limits](./storage.md) +For more details, see [Pull usage and limits](./pulls.md). ## Fair use @@ -45,10 +34,10 @@ exhibiting excessive data and storage consumption. Docker Hub has an abuse rate limit to protect the application and infrastructure. This limit applies to all requests to Hub properties including -web pages, APIs, and image pulls. The limit is applied per-IP, and while the -limit changes over time depending on load and other factors, it's in the order -of thousands of requests per minute. The abuse limit applies to all users -equally regardless of account level. +web pages, APIs, and image pulls. The limit is applied per IPv4 address or per +IPv6 /64 subnet, and while the limit changes over time depending on load and +other factors, it's in the order of thousands of requests per minute. The abuse +limit applies to all users equally regardless of account level. You can differentiate between the pull rate limit and abuse rate limit by looking at the error code. The abuse limit returns a simple `429 Too Many diff --git a/content/manuals/docker-hub/usage/manage.md b/content/manuals/docker-hub/usage/manage.md index 395a58a65f6c..14a01e7187ff 100644 --- a/content/manuals/docker-hub/usage/manage.md +++ b/content/manuals/docker-hub/usage/manage.md @@ -33,13 +33,10 @@ both individuals and organizations: 4. Optimize your storage by: - - Regularly auditing and removing repositories with untagged, unused, or outdated images. - - Looking for private repositories in Hub storage that exceed your plan's limits. + - Regularly auditing and [removing entire repositories](../repos/delete.md) with untagged, unused, or outdated images. + - Using [Image Management](../repos/manage/hub-images/manage.md) to remove stale and outdated images within a repository. -5. Increase your limits by upgrading or purchasing additional consumption. For - details, see [Scale your subscription](/manuals/subscription/scale.md). - -6. For organizations, monitor and enforce organizational policies by doing the +5. For organizations, monitor and enforce organizational policies by doing the following: - Routinely [view Docker Hub usage](https://hub.docker.com/usage) to monitor usage. diff --git a/content/manuals/docker-hub/usage/pulls.md b/content/manuals/docker-hub/usage/pulls.md index 52b1c92c7e20..4db9c0e90c1e 100644 --- a/content/manuals/docker-hub/usage/pulls.md +++ b/content/manuals/docker-hub/usage/pulls.md @@ -4,32 +4,25 @@ keywords: Docker Hub, pulls, usage, limit title: Docker Hub pull usage and limits linkTitle: Pulls weight: 10 +aliases: + - /docker-hub/usage/storage/ + - /docker-hub/usage/repositories/ --- -{{< include "hub-limits.md" >}} - -Unauthenticated and Docker Personal users are subject to hourly pull rate limits -on Docker Hub. In contrast, Docker Pro, Team, and Business users benefit from a -base number of included pulls per month without hourly rate restrictions. This -included usage is flexible, allowing you to scale or upgrade your subscription -to accommodate additional pulls or utilize on-demand pulls as needed. - -Any pulls exceeding the included amounts in each subscription tier will be -charged at an on-demand rate. To increase your monthly pull allowance and avoid -on-demand charges, you can [scale](/manuals/subscription/scale.md) or -[upgrade](/manuals/subscription/change.md) your subscription. +Unauthenticated and Docker Personal users are subject to a 6-hour pull rate limit +on Docker Hub. In contrast, Docker Pro, Team, and Business users benefit from +an unlimited pull rate. The following pull usage and limits apply based on your subscription, subject to fair use: - -| User type | Pulls per month | Pull rate limit per hour | -|--------------------------|-----------------|--------------------------| -| Business (authenticated) | 1M | Unlimited | -| Team (authenticated) | 100K | Unlimited | -| Pro (authenticated) | 25K | Unlimited | -| Personal (authenticated) | Not applicable | 40 | -| Unauthenticated Users | Not applicable | 10 per IP address | +| User type | Pull rate limit per 6 hours | +|--------------------------|-----------------------------------------| +| Business (authenticated) | Unlimited | +| Team (authenticated) | Unlimited | +| Pro (authenticated) | Unlimited | +| Personal (authenticated) | 200 | +| Unauthenticated Users | 100 per IPv4 address or IPv6 /64 subnet | ## Pull definition @@ -48,8 +41,7 @@ A pull is defined as the following: ## Pull attribution Pulls from authenticated users can be attributed to either a personal or an -organization -[namespace](/reference/glossary/#organization-name). +[organization namespace](/manuals/admin/faqs/general-faqs.md#whats-an-organization-name-or-namespace). Attribution is based on the following: @@ -71,9 +63,6 @@ Attribution is based on the following: organizations under the company, the pull is attributed to the user's personal namespace. -When pulling Docker Verified Publisher images, attribution towards rate limiting -is not applied. For more details, see [Docker Verified Publisher -Program](/manuals/docker-hub/repos/manage/trusted-content/dvp-program.md). ### Authentication @@ -121,6 +110,13 @@ for information on authentication. If you're using any third-party platforms, follow your provider’s instructions on using registry authentication. +> [!NOTE] +> +> When pulling images via a third-party platform, the platform may use the same +> IPv4 address or IPv6 /64 subnet to pull images for multiple users. Even if you +> are authenticated, pulls attributed to a single IPv4 address or IPv6 /64 subnet +> may cause [abuse rate limiting](./_index.md#abuse-rate-limit). + - [Artifactory](https://www.jfrog.com/confluence/display/JFROG/Advanced+Settings#AdvancedSettings-RemoteCredentials) - [AWS CodeBuild](https://aws.amazon.com/blogs/devops/how-to-use-docker-images-from-a-private-registry-in-aws-codebuild-for-your-build-environment/) - [AWS ECS/Fargate](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/private-auth.html) @@ -153,10 +149,9 @@ separated file with the following detailed information. | `version_checks` | The number of version checks accumulated for the date and hour of each image repository. Depending on the client, a pull can do a version check to verify the existence of an image or tag without downloading it. | This helps identify the frequency of version checks, which you can use to analyze usage trends and potential unexpected behaviors. | | `pulls` | The number of pulls accumulated for the date and hour of each image repository. | This helps identify the frequency of repository pulls, which you can use to analyze usage trends and potential unexpected behaviors. | +## View pull rate and limit -## View hourly pull rate and limit - -The pull rate limit is calculated on a per hour basis. There is no pull rate +The pull rate limit is calculated on a 6-hour basis. There is no pull rate limit for users or automated systems with a paid subscription. Unauthenticated and Docker Personal users using Docker Hub will experience rate limits on image pulls. @@ -184,8 +179,8 @@ To view your current pull rate and limit: $ TOKEN=$(curl "https://auth.docker.io/token?service=registry.docker.io&scope=repository:ratelimitpreview/test:pull" | jq -r .token) ``` - - To get a token with a user account, if you are authenticated (insert your - username and password in the following command): + - To get a token with a user account, if you are authenticated, insert your + username and password in the following command: ```console $ TOKEN=$(curl --user 'username:password' "https://auth.docker.io/token?service=registry.docker.io&scope=repository:ratelimitpreview/test:pull" | jq -r .token) @@ -204,15 +199,16 @@ To view your current pull rate and limit: ```text ratelimit-limit: 100;w=21600 - ratelimit-remaining: 76;w=21600 + ratelimit-remaining: 20;w=21600 docker-ratelimit-source: 192.0.2.1 ``` In the previous example, the pull limit is 100 pulls per 21600 seconds (6 - hours), and there are 76 pulls remaining. + hours), and there are 20 pulls remaining. If you don't see any `ratelimit` header, it could be because the image or your IP is unlimited in partnership with a publisher, provider, or an open source organization. It could also mean that the user you are pulling as is part of a - paid Docker plan. Pulling that image won't count toward pull rate limits if you - don't see these headers. \ No newline at end of file + paid Docker subscription. Pulling that image won't count toward pull rate limits if you + don't see these headers. + diff --git a/content/manuals/docker-hub/usage/storage.md b/content/manuals/docker-hub/usage/storage.md deleted file mode 100644 index 0c8c7a4c65aa..000000000000 --- a/content/manuals/docker-hub/usage/storage.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -description: Learn about storage usage limits for Docker Hub. -keywords: Docker Hub, usage, storage, repository -title: Docker Hub storage usage and limits -linkTitle: Storage -weight: 20 ---- - -{{< include "hub-limits.md" >}} - -The following storage and repository limits apply based on your subscription, subject to fair use: - -| Plan | Public repositories | Public repository storage | Private repositories | Private repository storage | -|----------|---------------------|---------------------------|----------------------------|----------------------------| -| Personal | Unlimited | Unlimited | Up to 1 private repository | Up to 2 GB | -| Pro | Unlimited | Unlimited | Unlimited | Up to 5 GB | -| Team | Unlimited | Unlimited | Unlimited | Up to 50 GB | -| Business | Unlimited | Unlimited | Unlimited | Up to 500 GB | - -Any storage usage beyond the included amounts in each paid subscription tier -will be charged at an on-demand rate. For more details about storage -calcultations and billing, see [Docker Hub storage -pricing](/manuals/billing/docker-hub-pricing.md). - -## View storage usage and repositories - -You can view your storage usage on the [Usage page](https://hub.docker.com/usage/storage) in Docker Hub. diff --git a/content/manuals/engine/cli/filter.md b/content/manuals/engine/cli/filter.md index e51fb633470f..9549f8a34b5d 100644 --- a/content/manuals/engine/cli/filter.md +++ b/content/manuals/engine/cli/filter.md @@ -30,15 +30,15 @@ output of the `docker images` command to only print `alpine` images. ```console $ docker images REPOSITORY TAG IMAGE ID CREATED SIZE -ubuntu 20.04 33a5cc25d22c 36 minutes ago 101MB -ubuntu 18.04 152dc042452c 36 minutes ago 88.1MB -alpine 3.16 a8cbb8c69ee7 40 minutes ago 8.67MB +ubuntu 24.04 33a5cc25d22c 36 minutes ago 101MB +ubuntu 22.04 152dc042452c 36 minutes ago 88.1MB +alpine 3.21 a8cbb8c69ee7 40 minutes ago 8.67MB alpine latest 7144f7bab3d4 40 minutes ago 11.7MB busybox uclibc 3e516f71d880 48 minutes ago 2.4MB busybox glibc 7338d0c72c65 48 minutes ago 6.09MB $ docker images --filter reference=alpine REPOSITORY TAG IMAGE ID CREATED SIZE -alpine 3.16 a8cbb8c69ee7 40 minutes ago 8.67MB +alpine 3.21 a8cbb8c69ee7 40 minutes ago 8.67MB alpine latest 7144f7bab3d4 40 minutes ago 11.7MB ``` @@ -58,9 +58,9 @@ following example shows how to print all images that match `alpine:latest` or ```console $ docker images REPOSITORY TAG IMAGE ID CREATED SIZE -ubuntu 20.04 33a5cc25d22c 2 hours ago 101MB -ubuntu 18.04 152dc042452c 2 hours ago 88.1MB -alpine 3.16 a8cbb8c69ee7 2 hours ago 8.67MB +ubuntu 24.04 33a5cc25d22c 2 hours ago 101MB +ubuntu 22.04 152dc042452c 2 hours ago 88.1MB +alpine 3.21 a8cbb8c69ee7 2 hours ago 8.67MB alpine latest 7144f7bab3d4 2 hours ago 11.7MB busybox uclibc 3e516f71d880 2 hours ago 2.4MB busybox glibc 7338d0c72c65 2 hours ago 6.09MB diff --git a/content/manuals/engine/cli/formatting.md b/content/manuals/engine/cli/formatting.md index e7b0c279343f..ad20c8c1cd9b 100644 --- a/content/manuals/engine/cli/formatting.md +++ b/content/manuals/engine/cli/formatting.md @@ -90,6 +90,26 @@ $ docker inspect --format "{{title .Name}}" container $ docker inspect --format "{{upper .Name}}" container ``` +## pad + +`pad` adds whitespace padding to a string. You can specify the number of spaces to add before and after the string. + +```console +$ docker image list --format '{{pad .Repository 5 10}}' +``` + +This example adds 5 spaces before the image repository name and 10 spaces after. + +## truncate + +`truncate` shortens a string to a specified length. If the string is shorter than the specified length, it remains unchanged. + +```console +$ docker image list --format '{{truncate .Repository 15}}' +``` + +This example displays the image repository name, truncating it to the first 15 characters if it's longer. + ## println `println` prints each value on a new line. diff --git a/content/manuals/engine/cli/otel.md b/content/manuals/engine/cli/otel.md index b9b9f979ee81..8ec3bbf88b70 100644 --- a/content/manuals/engine/cli/otel.md +++ b/content/manuals/engine/cli/otel.md @@ -6,7 +6,7 @@ aliases: - /config/otel/ --- -{{< introduced engine 26.1.0 >}} +{{< summary-bar feature_name="Docker CLI OpenTelemetry" >}} The Docker CLI supports [OpenTelemetry](https://opentelemetry.io/docs/) instrumentation for emitting metrics about command invocations. This is disabled by default. @@ -63,7 +63,7 @@ The following Docker Compose file bootstraps a set of services to get started wi It includes an OpenTelemetry collector that the CLI can send metrics to, and a Prometheus backend that scrapes the metrics off the collector. -```yaml {collapse=true,title=compose.yml} +```yaml {collapse=true,title=compose.yaml} name: cli-otel services: prometheus: @@ -95,7 +95,7 @@ volumes: ``` This service assumes that the following two configuration files exist alongside -`compose.yml`: +`compose.yaml`: - ```yaml {collapse=true,title=otelcol.yml} # Receive signals over gRPC and HTTP diff --git a/content/manuals/engine/containers/resource_constraints.md b/content/manuals/engine/containers/resource_constraints.md index 09e04a4a37e4..5f9efc616eba 100644 --- a/content/manuals/engine/containers/resource_constraints.md +++ b/content/manuals/engine/containers/resource_constraints.md @@ -69,8 +69,8 @@ You can mitigate the risk of system instability due to OOME by: Docker can enforce hard or soft memory limits. -- Hard limits lets the container use no more than a fixed amount of memory. -- Soft limits lets the container use as much memory as it needs unless certain +- Hard limits let the container use no more than a fixed amount of memory. +- Soft limits let the container use as much memory as it needs unless certain conditions are met, such as when the kernel detects low memory or contention on the host machine. @@ -162,7 +162,7 @@ a container. Consider the following scenarios: an OOM error. If the kernel memory limit is higher than the user memory limit, the kernel limit doesn't cause the container to experience an OOM. -When you enable kernel memory limits, the host machine tracks "high water mark" +When you enable kernel memory limits, the host machine tracks the "high water mark" statistics on a per-process basis, so you can track which processes (in this case, containers) are using excess memory. This can be seen per process by viewing `/proc//status` on the host machine. @@ -186,7 +186,7 @@ the container's cgroup on the host machine. | :--------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `--cpus=` | Specify how much of the available CPU resources a container can use. For instance, if the host machine has two CPUs and you set `--cpus="1.5"`, the container is guaranteed at most one and a half of the CPUs. This is the equivalent of setting `--cpu-period="100000"` and `--cpu-quota="150000"`. | | `--cpu-period=` | Specify the CPU CFS scheduler period, which is used alongside `--cpu-quota`. Defaults to 100000 microseconds (100 milliseconds). Most users don't change this from the default. For most use-cases, `--cpus` is a more convenient alternative. | -| `--cpu-quota=` | Impose a CPU CFS quota on the container. The number of microseconds per `--cpu-period` that the container is limited to before throttled. As such acting as the effective ceiling. For most use-cases, `--cpus` is a more convenient alternative. | +| `--cpu-quota=` | Impose a CPU CFS quota on the container. The number of microseconds per `--cpu-period` that the container is limited to before being throttled. As such acting as the effective ceiling. For most use-cases, `--cpus` is a more convenient alternative. | | `--cpuset-cpus` | Limit the specific CPUs or cores a container can use. A comma-separated list or hyphen-separated range of CPUs a container can use, if you have more than one CPU. The first CPU is numbered 0. A valid value might be `0-3` (to use the first, second, third, and fourth CPU) or `1,3` (to use the second and fourth CPU). | | `--cpu-shares` | Set this flag to a value greater or less than the default of 1024 to increase or reduce the container's weight, and give it access to a greater or lesser proportion of the host machine's CPU cycles. This is only enforced when CPU cycles are constrained. When plenty of CPU cycles are available, all containers use as much CPU as they need. In that way, this is a soft limit. `--cpu-shares` doesn't prevent containers from being scheduled in Swarm mode. It prioritizes container CPU resources for the available CPU cycles. It doesn't guarantee or reserve any specific CPU access. | @@ -234,7 +234,7 @@ for real-time tasks per runtime period. For instance, with the default period of containers using the real-time scheduler can run for 950000 microseconds for every 1000000-microsecond period, leaving at least 50000 microseconds available for non-real-time tasks. To make this configuration permanent on systems which use -`systemd`, create a systemd unit file for the `docker` service. For an example, +`systemd`, create a systemd unit file for the `docker` service. For example, see the instruction on how to configure the daemon to use a proxy with a [systemd unit file](../daemon/proxy.md#systemd-unit-file). @@ -343,6 +343,6 @@ environment variables. More information on valid variables can be found in the [nvidia-container-toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/docker-specialized.html) documentation. These variables can be set in a Dockerfile. -You can also use CUDA images which sets these variables automatically. See the +You can also use CUDA images, which set these variables automatically. See the official [CUDA images](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/cuda) NGC catalog page. diff --git a/content/manuals/engine/containers/start-containers-automatically.md b/content/manuals/engine/containers/start-containers-automatically.md index 213ae635c54a..f5a084362931 100644 --- a/content/manuals/engine/containers/start-containers-automatically.md +++ b/content/manuals/engine/containers/start-containers-automatically.md @@ -10,7 +10,7 @@ aliases: - /config/containers/start-containers-automatically/ --- -Docker provides [restart policies](/manuals/engine/containers/run.md#restart-policies---restart) +Docker provides [restart policies](/reference/cli/docker/container/run.md#restart) to control whether your containers start automatically when they exit, or when Docker restarts. Restart policies start linked containers in the correct order. Docker recommends that you use restart policies, and avoid using process @@ -22,7 +22,7 @@ a Docker upgrade, though networking and user input are interrupted. ## Use a restart policy -To configure the restart policy for a container, use the `--restart` flag +To configure the restart policy for a container, use the [`--restart`](/reference/cli/docker/container/run.md#restart) flag when using the `docker run` command. The value of the `--restart` flag can be any of the following: diff --git a/content/manuals/engine/daemon/live-restore.md b/content/manuals/engine/daemon/live-restore.md index 264afacf5c4e..ab27f50ec509 100644 --- a/content/manuals/engine/daemon/live-restore.md +++ b/content/manuals/engine/daemon/live-restore.md @@ -4,8 +4,9 @@ keywords: docker, upgrade, daemon, dockerd, live-restore, daemonless container title: Live restore weight: 40 aliases: - - /engine/admin/live-restore/ - /config/containers/live-restore/ + - /engine/admin/live-restore/ + - /engine/containers/live-restore/ --- By default, when the Docker daemon terminates, it shuts down running containers. diff --git a/content/manuals/engine/daemon/logs.md b/content/manuals/engine/daemon/logs.md index 0b09f3e8e3b0..9a564b9fd3be 100644 --- a/content/manuals/engine/daemon/logs.md +++ b/content/manuals/engine/daemon/logs.md @@ -117,7 +117,7 @@ The Docker daemon log can be viewed by using one of the following methods: Look in the Docker logs for a message like the following: -```none +```text ...goroutine stacks written to /var/run/docker/goroutine-stacks-2017-06-02T193336z.log ``` diff --git a/content/manuals/engine/daemon/troubleshoot.md b/content/manuals/engine/daemon/troubleshoot.md index 7b68c88fb04a..770b2db6179d 100644 --- a/content/manuals/engine/daemon/troubleshoot.md +++ b/content/manuals/engine/daemon/troubleshoot.md @@ -545,7 +545,7 @@ all other running containers as filesystems within the container which mounts `/var/lib/docker/`. When you attempt to remove any of these containers, the removal attempt may fail with an error like the following: -```none +```text Error: Unable to remove filesystem for 74bef250361c7817bee19349c93139621b272bc8f654ae112dd4eb9652af9515: remove /var/lib/docker/containers/74bef250361c7817bee19349c93139621b272bc8f654ae112dd4eb9652af9515/shm: diff --git a/content/manuals/engine/install/binaries.md b/content/manuals/engine/install/binaries.md index 82930444038d..6ac416455ca3 100644 --- a/content/manuals/engine/install/binaries.md +++ b/content/manuals/engine/install/binaries.md @@ -138,7 +138,7 @@ instructions for enabling and configuring AppArmor or SELinux. You have now successfully installed and started Docker Engine. -{{< include "root-errors.md" >}} +{{% include "root-errors.md" %}} ## Install client binaries on macOS diff --git a/content/manuals/engine/install/centos.md b/content/manuals/engine/install/centos.md index a79b2cd0535b..d01ae2aaef41 100644 --- a/content/manuals/engine/install/centos.md +++ b/content/manuals/engine/install/centos.md @@ -165,7 +165,7 @@ $ sudo dnf config-manager --add-repo {{% param "download-url-base" %}}/docker-ce You have now successfully installed and started Docker Engine. -{{< include "root-errors.md" >}} +{{% include "root-errors.md" %}} #### Upgrade Docker Engine @@ -214,7 +214,7 @@ download a new file each time you want to upgrade Docker Engine. You have now successfully installed and started Docker Engine. -{{< include "root-errors.md" >}} +{{% include "root-errors.md" %}} #### Upgrade Docker Engine @@ -222,7 +222,7 @@ To upgrade Docker Engine, download the newer package files and repeat the [installation procedure](#install-from-a-package), using `dnf upgrade` instead of `dnf install`, and point to the new files. -{{< include "install-script.md" >}} +{{% include "install-script.md" %}} ## Uninstall Docker Engine diff --git a/content/manuals/engine/install/debian.md b/content/manuals/engine/install/debian.md index 1a0e17c9f10f..0ca59c2490a8 100644 --- a/content/manuals/engine/install/debian.md +++ b/content/manuals/engine/install/debian.md @@ -42,6 +42,7 @@ To get started with Docker Engine on Debian, make sure you To install Docker Engine, you need the 64-bit version of one of these Debian versions: +- Debian Trixie 13 (testing) - Debian Bookworm 12 (stable) - Debian Bullseye 11 (oldstable) @@ -144,7 +145,7 @@ Docker from the repository. ```console $ sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin ``` - + {{< /tab >}} {{< tab name="Specific version" >}} @@ -181,7 +182,7 @@ Docker from the repository. You have now successfully installed and started Docker Engine. -{{< include "root-errors.md" >}} +{{% include "root-errors.md" %}} #### Upgrade Docker Engine @@ -237,14 +238,14 @@ download a new file each time you want to upgrade Docker Engine. You have now successfully installed and started Docker Engine. -{{< include "root-errors.md" >}} +{{% include "root-errors.md" %}} #### Upgrade Docker Engine To upgrade Docker Engine, download the newer package files and repeat the [installation procedure](#install-from-a-package), pointing to the new files. -{{< include "install-script.md" >}} +{{% include "install-script.md" %}} ## Uninstall Docker Engine diff --git a/content/manuals/engine/install/fedora.md b/content/manuals/engine/install/fedora.md index 9ee612c60cf8..a81766f05235 100644 --- a/content/manuals/engine/install/fedora.md +++ b/content/manuals/engine/install/fedora.md @@ -26,7 +26,7 @@ To get started with Docker Engine on Fedora, make sure you To install Docker Engine, you need a maintained version of one of the following Fedora versions: -- Fedora 40 +- Fedora 42 - Fedora 41 ### Uninstall old versions @@ -162,7 +162,7 @@ $ sudo dnf-3 config-manager --add-repo {{% param "download-url-base" %}}/docker- You have now successfully installed and started Docker Engine. -{{< include "root-errors.md" >}} +{{% include "root-errors.md" %}} #### Upgrade Docker Engine @@ -211,7 +211,7 @@ download a new file each time you want to upgrade Docker Engine. You have now successfully installed and started Docker Engine. -{{< include "root-errors.md" >}} +{{% include "root-errors.md" %}} #### Upgrade Docker Engine @@ -219,7 +219,7 @@ To upgrade Docker Engine, download the newer package files and repeat the [installation procedure](#install-from-a-package), using `dnf upgrade` instead of `dnf install`, and point to the new files. -{{< include "install-script.md" >}} +{{% include "install-script.md" %}} ## Uninstall Docker Engine diff --git a/content/manuals/engine/install/linux-postinstall.md b/content/manuals/engine/install/linux-postinstall.md index 443185d632fc..f82fe4b61179 100644 --- a/content/manuals/engine/install/linux-postinstall.md +++ b/content/manuals/engine/install/linux-postinstall.md @@ -78,7 +78,7 @@ To create the `docker` group and add your user: If you initially ran Docker CLI commands using `sudo` before adding your user to the `docker` group, you may see the following error: - ```none + ```text WARNING: Error loading config file: /home/user/.docker/config.json - stat /home/user/.docker/config.json: permission denied ``` diff --git a/content/manuals/engine/install/raspberry-pi-os.md b/content/manuals/engine/install/raspberry-pi-os.md index ca4d69caedd7..1399c73ce073 100644 --- a/content/manuals/engine/install/raspberry-pi-os.md +++ b/content/manuals/engine/install/raspberry-pi-os.md @@ -169,7 +169,7 @@ Docker from the repository. You have now successfully installed and started Docker Engine. -{{< include "root-errors.md" >}} +{{% include "root-errors.md" %}} #### Upgrade Docker Engine @@ -225,14 +225,14 @@ download a new file each time you want to upgrade Docker Engine. You have now successfully installed and started Docker Engine. -{{< include "root-errors.md" >}} +{{% include "root-errors.md" %}} #### Upgrade Docker Engine To upgrade Docker Engine, download the newer package files and repeat the [installation procedure](#install-from-a-package), pointing to the new files. -{{< include "install-script.md" >}} +{{% include "install-script.md" %}} ## Uninstall Docker Engine diff --git a/content/manuals/engine/install/rhel.md b/content/manuals/engine/install/rhel.md index e0678357fc2f..f76d01be5ce9 100644 --- a/content/manuals/engine/install/rhel.md +++ b/content/manuals/engine/install/rhel.md @@ -165,7 +165,7 @@ $ sudo dnf config-manager --add-repo {{% param "download-url-base" %}}/docker-ce You have now successfully installed and started Docker Engine. -{{< include "root-errors.md" >}} +{{% include "root-errors.md" %}} #### Upgrade Docker Engine @@ -230,7 +230,7 @@ download a new file each time you want to upgrade Docker Engine. You have now successfully installed and started Docker Engine. -{{< include "root-errors.md" >}} +{{% include "root-errors.md" %}} #### Upgrade Docker Engine @@ -238,7 +238,7 @@ To upgrade Docker Engine, download the newer package files and repeat the [installation procedure](#install-from-a-package), using `dnf upgrade` instead of `dnf install`, and point to the new files. -{{< include "install-script.md" >}} +{{% include "install-script.md" %}} ## Uninstall Docker Engine diff --git a/content/manuals/engine/install/sles.md b/content/manuals/engine/install/sles.md index b148e60a1b10..d32163c93faa 100644 --- a/content/manuals/engine/install/sles.md +++ b/content/manuals/engine/install/sles.md @@ -185,7 +185,7 @@ $ sudo zypper addrepo {{% param "download-url-base" %}}/docker-ce.repo You have now successfully installed and started Docker Engine. -{{< include "root-errors.md" >}} +{{% include "root-errors.md" %}} #### Upgrade Docker Engine @@ -234,7 +234,7 @@ download a new file each time you want to upgrade Docker Engine. You have now successfully installed and started Docker Engine. -{{< include "root-errors.md" >}} +{{% include "root-errors.md" %}} #### Upgrade Docker Engine @@ -242,7 +242,7 @@ To upgrade Docker Engine, download the newer package files and repeat the [installation procedure](#install-from-a-package), using `zypper -y upgrade` instead of `zypper -y install`, and point to the new files. -{{< include "install-script.md" >}} +{{% include "install-script.md" %}} ## Uninstall Docker Engine diff --git a/content/manuals/engine/install/ubuntu.md b/content/manuals/engine/install/ubuntu.md index ae89082b26d5..94027a89a494 100644 --- a/content/manuals/engine/install/ubuntu.md +++ b/content/manuals/engine/install/ubuntu.md @@ -19,6 +19,7 @@ aliases: - /install/linux/docker-ee/ubuntu/ - /install/linux/ubuntu/ - /installation/ubuntulinux/ +- /linux/step_one/ download-url-base: https://download.docker.com/linux/ubuntu --- @@ -53,11 +54,15 @@ versions: - Ubuntu Oracular 24.10 - Ubuntu Noble 24.04 (LTS) - Ubuntu Jammy 22.04 (LTS) -- Ubuntu Focal 20.04 (LTS) Docker Engine for Ubuntu is compatible with x86_64 (or amd64), armhf, arm64, s390x, and ppc64le (ppc64el) architectures. +> [!NOTE] +> +> Installation on Ubuntu derivative distributions, such as Linux Mint, is not officially +> supported (though it may work). + ### Uninstall old versions Before you can install Docker Engine, you need to uninstall any conflicting packages. @@ -127,16 +132,11 @@ Docker from the repository. # Add the repository to Apt sources: echo \ "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] {{% param "download-url-base" %}} \ - $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \ + $(. /etc/os-release && echo "${UBUNTU_CODENAME:-$VERSION_CODENAME}") stable" | \ sudo tee /etc/apt/sources.list.d/docker.list > /dev/null sudo apt-get update ``` - > [!NOTE] - > - > If you use an Ubuntu derivative distribution, such as Linux Mint, - > you may need to use `UBUNTU_CODENAME` instead of `VERSION_CODENAME`. - 2. Install the Docker packages. {{< tabs >}} @@ -184,7 +184,7 @@ Docker from the repository. You have now successfully installed and started Docker Engine. -{{< include "root-errors.md" >}} +{{% include "root-errors.md" %}} #### Upgrade Docker Engine @@ -240,14 +240,14 @@ download a new file each time you want to upgrade Docker Engine. You have now successfully installed and started Docker Engine. -{{< include "root-errors.md" >}} +{{% include "root-errors.md" %}} #### Upgrade Docker Engine To upgrade Docker Engine, download the newer package files and repeat the [installation procedure](#install-from-a-package), pointing to the new files. -{{< include "install-script.md" >}} +{{% include "install-script.md" %}} ## Uninstall Docker Engine diff --git a/content/manuals/engine/logging/configure.md b/content/manuals/engine/logging/configure.md index fa7801fb794a..596d27adb611 100644 --- a/content/manuals/engine/logging/configure.md +++ b/content/manuals/engine/logging/configure.md @@ -10,6 +10,7 @@ aliases: - /engine/admin/logging/logentries/ - /engine/admin/logging/overview/ - /config/containers/logging/configure/ + - /config/containers/ --- Docker includes multiple logging mechanisms to help you get information from @@ -30,7 +31,7 @@ included with Docker, you can also implement and use [logging driver plugins](pl > output, which can lead to disk space exhaustion. > > Docker keeps the json-file logging driver (without log-rotation) as a default -> to remain backward compatibility with older versions of Docker, and for situations +> to remain backwards compatible with older versions of Docker, and for situations > where Docker is used as runtime for Kubernetes. > > For other situations, the `local` logging driver is recommended as it performs diff --git a/content/manuals/engine/logging/drivers/gelf.md b/content/manuals/engine/logging/drivers/gelf.md index 7676fe4f1875..5416bdc9463d 100644 --- a/content/manuals/engine/logging/drivers/gelf.md +++ b/content/manuals/engine/logging/drivers/gelf.md @@ -66,7 +66,7 @@ The `gelf` logging driver supports the following options: | `gelf-address` | required | The address of the GELF server. `tcp` and `udp` are the only supported URI specifier and you must specify the port. | `--log-opt gelf-address=udp://192.168.0.42:12201` | | `gelf-compression-type` | optional | `UDP Only` The type of compression the GELF driver uses to compress each log message. Allowed values are `gzip`, `zlib` and `none`. The default is `gzip`. Note that enabled compression leads to excessive CPU usage, so it's highly recommended to set this to `none`. | `--log-opt gelf-compression-type=gzip` | | `gelf-compression-level` | optional | `UDP Only` The level of compression when `gzip` or `zlib` is the `gelf-compression-type`. An integer in the range of `-1` to `9` (BestCompression). Default value is 1 (BestSpeed). Higher levels provide more compression at lower speed. Either `-1` or `0` disables compression. | `--log-opt gelf-compression-level=2` | -| `gelf-tcp-max-reconnect` | optional | `TCP Only` The maximum number of reconnection attempts when the connection drop. An positive integer. Default value is 3. | `--log-opt gelf-tcp-max-reconnect=3` | +| `gelf-tcp-max-reconnect` | optional | `TCP Only` The maximum number of reconnection attempts when the connection drop. A positive integer. Default value is 3. | `--log-opt gelf-tcp-max-reconnect=3` | | `gelf-tcp-reconnect-delay` | optional | `TCP Only` The number of seconds to wait between reconnection attempts. A positive integer. Default value is 1. | `--log-opt gelf-tcp-reconnect-delay=1` | | `tag` | optional | A string that's appended to the `APP-NAME` in the `gelf` message. By default, Docker uses the first 12 characters of the container ID to tag log messages. Refer to the [log tag option documentation](log_tags.md) for customizing the log tag format. | `--log-opt tag=mailer` | | `labels` | optional | Applies when starting the Docker daemon. A comma-separated list of logging-related labels this daemon accepts. Adds additional key on the `extra` fields, prefixed by an underscore (`_`). Used for advanced [log tag options](log_tags.md). | `--log-opt labels=production_status,geo` | diff --git a/content/manuals/engine/logging/drivers/syslog.md b/content/manuals/engine/logging/drivers/syslog.md index 2cabe82bcda7..bfdce11d1e67 100644 --- a/content/manuals/engine/logging/drivers/syslog.md +++ b/content/manuals/engine/logging/drivers/syslog.md @@ -22,7 +22,7 @@ receiver can extract the following information: The format is defined in [RFC 5424](https://tools.ietf.org/html/rfc5424) and Docker's syslog driver implements the [ABNF reference](https://tools.ietf.org/html/rfc5424#section-6) in the following way: -```none +```text TIMESTAMP SP HOSTNAME SP APP-NAME SP PROCID SP MSGID + + + | + | | | | | diff --git a/content/manuals/engine/logging/log_tags.md b/content/manuals/engine/logging/log_tags.md index d0372fe5c25c..d9493abc2dbd 100644 --- a/content/manuals/engine/logging/log_tags.md +++ b/content/manuals/engine/logging/log_tags.md @@ -30,7 +30,7 @@ Docker supports some special template markup you can use when specifying a tag's For example, specifying a `--log-opt tag="{{.ImageName}}/{{.Name}}/{{.ID}}"` value yields `syslog` log lines like: -```none +```text Aug 7 18:33:19 HOSTNAME hello-world/foobar/5790672ab6a0[9103]: Hello from Docker. ``` diff --git a/content/manuals/engine/network/_index.md b/content/manuals/engine/network/_index.md index 0ffc841b59f7..97583dfe8af1 100644 --- a/content/manuals/engine/network/_index.md +++ b/content/manuals/engine/network/_index.md @@ -7,6 +7,7 @@ keywords: networking, container, standalone, IP address, DNS resolution aliases: - /articles/networking/ - /config/containers/container-networking/ +- /engine/tutorials/networkingcontainers/ - /engine/userguide/networking/ - /engine/userguide/networking/configure-dns/ - /engine/userguide/networking/default_network/binding/ @@ -63,6 +64,41 @@ networking functionality: For more information about the different drivers, see [Network drivers overview](./drivers/_index.md). +### Connecting to multiple networks + +A container can be connected to multiple networks. + +For example, a frontend container may be connected to a bridge network +with external access, and a +[`--internal`](/reference/cli/docker/network/create/#internal) network +to communicate with containers running backend services that do not need +external network access. + +A container may also be connected to different types of network. For example, +an `ipvlan` network to provide internet access, and a `bridge` network for +access to local services. + +When sending packets, if the destination is an address in a directly connected +network, packets are sent to that network. Otherwise, packets are sent to +a default gateway for routing to their destination. In the example above, +the `ipvlan` network's gateway must be the default gateway. + +The default gateway is selected by Docker, and may change whenever a +container's network connections change. +To make Docker choose a specific default gateway when creating the container +or connecting a new network, set a gateway priority. See option `gw-priority` +for the [`docker run`](/reference/cli/docker/container/run.md) and +[`docker network connect`](/reference/cli/docker/network/connect.md) commands. + +The default `gw-priority` is `0` and the gateway in the network with the +highest priority is the default gateway. So, when a network should always +be the default gateway, it is enough to set its `gw-priority` to `1`. + +```console +$ docker run --network name=gwnet,gw-priority=1 --network anet1 --name myctr myimage +$ docker network connect anet2 myctr +``` + ## Container networks In addition to user-defined networks, you can attach a container to another @@ -124,8 +160,8 @@ Here are some examples: > > > [!WARNING] > > -> > Hosts within the same L2 segment (for example, hosts connected to the same -> > network switch) can reach ports published to localhost. +> > In releases older than 28.0.0, hosts within the same L2 segment (for example, +> > hosts connected to the same network switch) can reach ports published to localhost. > > For more information, see > > [moby/moby#45610](https://github.com/moby/moby/issues/45610) @@ -144,6 +180,14 @@ direct routing to containers, see ## IP address and hostname +When creating a network, IPv4 address allocation is enabled by default, it +can be disabled using `--ipv4=false`. IPv6 address allocation can be enabled +using `--ipv6`. + +```console +$ docker network create --ipv6 --ipv4=false v6net +``` + By default, the container gets an IP address for every Docker network it attaches to. A container receives an IP address out of the IP subnet of the network. The Docker daemon performs dynamic subnetting and IP address allocation for containers. diff --git a/content/manuals/engine/network/drivers/bridge.md b/content/manuals/engine/network/drivers/bridge.md index 03d3c402209b..4f0b3268d368 100644 --- a/content/manuals/engine/network/drivers/bridge.md +++ b/content/manuals/engine/network/drivers/bridge.md @@ -103,18 +103,18 @@ flag. ## Options The following table describes the driver-specific options that you can pass to -`--option` when creating a custom network using the `bridge` driver. - -| Option | Default | Description | -|-------------------------------------------------------------------------------------------------|-----------------------------|------------------------------------------------------------------------------------------------| -| `com.docker.network.bridge.name` | | Interface name to use when creating the Linux bridge. | -| `com.docker.network.bridge.enable_ip_masquerade` | `true` | Enable IP masquerading. | -| `com.docker.network.bridge.gateway_mode_ipv4`
`com.docker.network.bridge.gateway_mode_ipv6` | `nat` | Enable NAT and masquerading (`nat`), or only allow direct routing to the container (`routed`). | -| `com.docker.network.bridge.enable_icc` | `true` | Enable or Disable inter-container connectivity. | -| `com.docker.network.bridge.host_binding_ipv4` | all IPv4 and IPv6 addresses | Default IP when binding container ports. | -| `com.docker.network.driver.mtu` | `0` (no limit) | Set the containers network Maximum Transmission Unit (MTU). | -| `com.docker.network.container_iface_prefix` | `eth` | Set a custom prefix for container interfaces. | -| `com.docker.network.bridge.inhibit_ipv4` | `false` | Prevent Docker from [assigning an IP address](#skip-ip-address-configuration) to the network. | +`--opt` when creating a custom network using the `bridge` driver. + +| Option | Default | Description | +|-------------------------------------------------------------------------------------------------|-----------------------------|-----------------------------------------------------------------------------------------------------| +| `com.docker.network.bridge.name` | | Interface name to use when creating the Linux bridge. | +| `com.docker.network.bridge.enable_ip_masquerade` | `true` | Enable IP masquerading. | +| `com.docker.network.bridge.gateway_mode_ipv4`
`com.docker.network.bridge.gateway_mode_ipv6` | `nat` | Control external connectivity. See [Packet filtering and firewalls](packet-filtering-firewalls.md). | +| `com.docker.network.bridge.enable_icc` | `true` | Enable or Disable inter-container connectivity. | +| `com.docker.network.bridge.host_binding_ipv4` | all IPv4 and IPv6 addresses | Default IP when binding container ports. | +| `com.docker.network.driver.mtu` | `0` (no limit) | Set the containers network Maximum Transmission Unit (MTU). | +| `com.docker.network.container_iface_prefix` | `eth` | Set a custom prefix for container interfaces. | +| `com.docker.network.bridge.inhibit_ipv4` | `false` | Prevent Docker from [assigning an IP address](#skip-bridge-ip-address-configuration) to the bridge. | Some of these options are also available as flags to the `dockerd` CLI, and you can use them to configure the default `docker0` bridge when starting the Docker @@ -229,6 +229,20 @@ When you create your network, you can specify the `--ipv6` flag to enable IPv6. $ docker network create --ipv6 --subnet 2001:db8:1234::/64 my-net ``` +If you do not provide a `--subnet` option, a Unique Local Address (ULA) prefix +will be chosen automatically. + +## IPv6-only bridge networks + +To skip IPv4 address configuration on the bridge and in its containers, create +the network with option `--ipv4=false`, and enable IPv6 using `--ipv6`. + +```console +$ docker network create --ipv6 --ipv4=false v6net +``` + +IPv4 address configuration cannot be disabled in the default bridge network. + ## Use the default bridge network The default `bridge` network is considered a legacy detail of Docker and is not @@ -259,7 +273,12 @@ the settings you need to customize. } ``` -Restart Docker for the changes to take effect. +In this example: + +- The bridge's address is "192.168.1.1/24" (from `bip`). +- The bridge network's subnet is "192.168.1.0/24" (from `bip`). +- Container addresses will be allocated from "192.168.1.0/25" (from `fixed-cidr`). + ### Use IPv6 with the default bridge network @@ -270,22 +289,34 @@ These three options only affect the default bridge, they are not used by user-defined networks. The addresses in below are examples from the IPv6 documentation range. -- Option `ipv6` is required -- Option `fixed-cidr-v6` is required, it specifies the network prefix to be used. +- Option `ipv6` is required. +- Option `bip6` is optional, it specifies the address of the default bridge, which + will be used as the default gateway by containers. It also specifies the subnet + for the bridge network. +- Option `fixed-cidr-v6` is optional, it specifies the address range Docker may + automatically allocate to containers. - The prefix should normally be `/64` or shorter. - For experimentation on a local network, it is better to use a Unique Local - prefix (matching `fd00::/8`) than a Link Local prefix (matching `fe80::/10`). + Address (ULA) prefix (matching `fd00::/8`) than a Link Local prefix (matching + `fe80::/10`). - Option `default-gateway-v6` is optional. If unspecified, the default is the first address in the `fixed-cidr-v6` subnet. ```json { "ipv6": true, + "bip6": "2001:db8::1111/64", "fixed-cidr-v6": "2001:db8::/64", "default-gateway-v6": "2001:db8:abcd::89" } ``` +If no `bip6` is specified, `fixed-cidr-v6` defines the subnet for the bridge +network. If no `bip6` or `fixed-cidr-v6` is specified, a ULA prefix will be +chosen. + +Restart Docker for changes to take effect. + ## Connection limit for bridge networks Due to limitations set by the Linux kernel, bridge networks become unstable and @@ -295,20 +326,22 @@ to a single network. For more information about this limitation, see [moby/moby#44973](https://github.com/moby/moby/issues/44973#issuecomment-1543747718). -## Skip IP address configuration +## Skip Bridge IP address configuration + +The bridge is normally assigned the network's `--gateway` address, which is +used as the default route from the bridge network to other networks. The `com.docker.network.bridge.inhibit_ipv4` option lets you create a network -that uses an existing bridge and have Docker skip configuring the IPv4 address -on the bridge. This is useful if you want to configure the IP address for the -bridge manually. For instance if you add a physical interface to your bridge, -and need to move its IP address to the bridge interface. +without the IPv4 gateway address being assigned to the bridge. This is useful +if you want to configure the gateway IP address for the bridge manually. For +instance if you add a physical interface to your bridge, and need it to have +the gateway address. -To use this option, you should first configure the Docker daemon to use a -self-managed bridge, using the `bridge` option in the `daemon.json` or the -`dockerd --bridge` flag. +With this configuration, north-south traffic (to and from the bridge network) +won't work unless you've manually configured the gateway address on the bridge, +or a device attached to it. -With this configuration, north-south traffic won't work unless you've manually -configured the IP address for the bridge. +This option can only be used with user-defined bridge networks. ## Next steps diff --git a/content/manuals/engine/network/drivers/ipvlan.md b/content/manuals/engine/network/drivers/ipvlan.md index f33e779d9fc2..c5adad016eb9 100644 --- a/content/manuals/engine/network/drivers/ipvlan.md +++ b/content/manuals/engine/network/drivers/ipvlan.md @@ -32,7 +32,7 @@ is no need for port mappings in these scenarios. ## Options The following table describes the driver-specific options that you can pass to -`--option` when creating a network using the `ipvlan` driver. +`--opt` when creating a network using the `ipvlan` driver. | Option | Default | Description | | ------------- | -------- | --------------------------------------------------------------------- | diff --git a/content/manuals/engine/network/drivers/macvlan.md b/content/manuals/engine/network/drivers/macvlan.md index 3817c011376a..43e67a7a04e3 100644 --- a/content/manuals/engine/network/drivers/macvlan.md +++ b/content/manuals/engine/network/drivers/macvlan.md @@ -35,7 +35,7 @@ Keep the following things in mind: ## Options The following table describes the driver-specific options that you can pass to -`--option` when creating a network using the `macvlan` driver. +`--opt` when creating a network using the `macvlan` driver. | Option | Default | Description | | -------------- | -------- | ----------------------------------------------------------------------------- | diff --git a/content/manuals/engine/network/packet-filtering-firewalls.md b/content/manuals/engine/network/packet-filtering-firewalls.md index b77041965835..72fa993b2e0a 100644 --- a/content/manuals/engine/network/packet-filtering-firewalls.md +++ b/content/manuals/engine/network/packet-filtering-firewalls.md @@ -32,17 +32,22 @@ following custom `iptables` chains: * `DOCKER-USER` * A placeholder for user-defined rules that will be processed before rules - in the `DOCKER` chain. + in the `DOCKER-FORWARD` and `DOCKER` chains. +* `DOCKER-FORWARD` + * The first stage of processing for Docker's networks. Rules that pass packets + that are not related to established connections to the other Docker chains, + as well as rules to accept packets that are part of established connections. * `DOCKER` * Rules that determine whether a packet that is not part of an established connection should be accepted, based on the port forwarding configuration of running containers. * `DOCKER-ISOLATION-STAGE-1` and `DOCKER-ISOLATION-STAGE-2` * Rules to isolate Docker networks from each other. +* `DOCKER-INGRESS` + * Rules related to Swarm networking. -In the `FORWARD` chain, Docker adds rules that pass packets that are not related -to established connections to these custom chains, as well as rules to accept -packets that are part of established connections. +In the `FORWARD` chain, Docker adds rules that unconditionally jump to the +`DOCKER-USER`, `DOCKER-FORWARD` and `DOCKER-INGRESS` chains. In the `nat` table, Docker creates chain `DOCKER` and adds rules to implement masquerading and port-mapping. @@ -53,6 +58,8 @@ Packets that get accepted or rejected by rules in these custom chains will not be seen by user-defined rules appended to the `FORWARD` chain. So, to add additional rules to filter these packets, use the `DOCKER-USER` chain. +Rules appended to the `FORWARD` chain will be processed after Docker's rules. + ### Match the original IP and ports for requests When packets arrive to the `DOCKER-USER` chain, they have already passed through @@ -119,6 +126,17 @@ the source and destination. For instance, if the Docker host has addresses `2001:db8:1111::2` and `2001:db8:2222::2`, you can make rules specific to `2001:db8:1111::2` and leave `2001:db8:2222::2` open. +You may need to allow responses from servers outside the permitted external address +ranges. For example, containers may send DNS or HTTP requests to hosts that are +not allowed to access the container's services. The following rule accepts any +incoming or outgoing packet belonging to a flow that has already been accepted +by other rules. It must be placed before `DROP` rules that restrict access from +external address ranges. + +```console +$ iptables -I DOCKER-USER -m state --state RELATED,ESTABLISHED -j ACCEPT +``` + `iptables` is complicated. There is a lot more information at [Netfilter.org HOWTO](https://www.netfilter.org/documentation/HOWTO/NAT-HOWTO.html). ### Direct routing @@ -129,31 +147,114 @@ clients. No routes are normally set up in the host's network for container addresses that exist within a host. But, particularly with IPv6 you may prefer to avoid using NAT and instead -arrange for external routing to container addresses. +arrange for external routing to container addresses ("direct routing"). To access containers on a bridge network from outside the Docker host, -you must set up routing to the bridge network via an address on the Docker -host. This can be achieved using static routes, Border Gateway Protocol -(BGP), or any other means appropriate for your network. +you must first set up routing to the bridge network via an address on the +Docker host. This can be achieved using static routes, Border Gateway Protocol (BGP), +or any other means appropriate for your network. For example, within +a local layer 2 network, remote hosts can set up static routes to a container +network via the Docker daemon host's address on the local network. + +#### Direct routing to containers in bridge networks + +By default, remote hosts are not allowed direct access to container IP +addresses in Docker's Linux bridge networks. They can only access ports +published to host IP addresses. + +To allow direct access to any published port, on any container, in any +Linux bridge network, use daemon option `"allow-direct-routing": true` +in `/etc/docker/daemon.json` or the equivalent `--allow-direct-routing`. + +To allow direct routing from anywhere to containers in a specific bridge +network, see [Gateway modes](#gateway-modes). + +Or, to allow direct routing via specific host interfaces, to a specific +bridge network, use the following option when creating the network: +- `com.docker.network.bridge.trusted_host_interfaces` + +#### Example + +Create a network where published ports on container IP addresses can be +accessed directly from interfaces `vxlan.1` and `eth3`: + +```console +$ docker network create --subnet 192.0.2.0/24 --ip-range 192.0.2.0/29 -o com.docker.network.bridge.trusted_host_interfaces="vxlan.1:eth3" mynet +``` + +Run a container in that network, publishing its port 80 to port 8080 on +the host's loopback interface: + +```console +$ docker run -d --ip 192.0.2.100 -p 127.0.0.1:8080:80 nginx +``` + +The web server running on the container's port 80 can now be accessed +from the Docker host at `http://127.0.0.1:8080`, or directly at +`http://192.0.2.100:80`. If remote hosts on networks connected to +interfaces `vxlan.1` and `eth3` have a route to the `192.0.2.0/24` +network inside the Docker host, they can also access the web server +via `http://192.0.2.100:80`. + +#### Gateway modes + +The bridge network driver has the following options: +- `com.docker.network.bridge.gateway_mode_ipv6` +- `com.docker.network.bridge.gateway_mode_ipv4` -The bridge network driver has options -`com.docker.network.bridge.gateway_mode_ipv6=` and -`com.docker.network.bridge.gateway_mode_ipv4=`. +Each of these can be set to one of the gateway modes: +- `nat` +- `nat-unprotected` +- `routed` +- `isolated` The default is `nat`, NAT and masquerading rules are set up for each -published container port. With mode `routed`, no NAT or masquerading rules -are set up, but `iptables` are still set up so that only published container -ports are accessible. +published container port. Packets leaving the host will use a host address. + +With mode `routed`, no NAT or masquerading rules are set up, but `iptables` +are still set up so that only published container ports are accessible. +Outgoing packets from the container will use the container's address, +not a host address. + +In `nat` mode, when a port is published to a specific host address, that +port is only accessible via the host interface with that address. So, +for example, publishing a port to an address on the loopback interface +means remote hosts cannot access it. + +However, using direct routing, published container ports are always +accessible from remote hosts, unless the Docker host's firewall has +additional restrictions. Hosts on the local layer-2 network can set up +direct routing without needing any additional network configuration. +Hosts outside the local network can only use direct routing to the +container if the network's routers are configured to enable it. + +In `nat-unprotected` mode, unpublished container ports are also +accessible using direct routing, no port filtering rules are set up. +This mode is included for compatibility with legacy default behaviour. + +The gateway mode also affects communication between containers that +are connected to different Docker networks on the same host. +- In `nat` and `nat-unprotected` modes, containers in other bridge + networks can only access published ports via the host addresses they + are published to. Direct routing from other networks is not allowed. +- In `routed` mode containers in other networks can use direct + routing to access ports, without going via a host address. In `routed` mode, a host port in a `-p` or `--publish` port mapping is not used, and the host address is only used to decide whether to apply the mapping to IPv4 or IPv6. So, when a mapping only applies to `routed` -mode, only addresses `0.0.0.0` or `::1` are allowed, and a host port -must not be given. - -Mapped container ports, in `nat` or `routed` mode, are accessible from -any remote address, if routing is set up in the network, unless the -Docker host's firewall has additional restrictions. +mode, only addresses `0.0.0.0` or `::` should be used, and a host port +should not be given. If a specific address or port is given, it will +have no effect on the published port and a warning message will be +logged. + +Mode `isolated` can only be used when the network is also created with +CLI flag `--internal`, or equivalent. An address is normally assigned to the +bridge device in an `internal` network. So, processes on the docker host can +access the network, and containers in the network can access host services +listening on that bridge address (including services listening on "any" host +address, `0.0.0.0` or `::`). No address is assigned to the bridge when the +network is created with gateway mode `isolated`. #### Example @@ -169,14 +270,14 @@ $ docker run --network=mynet -p 8080:80 myimage ``` Then: -- Only container port 80 will be open, for IPv4 and IPv6. It is accessible - from anywhere, if there is routing to the container's address, and access - is not blocked by the host's firewall. +- Only container port 80 will be open, for IPv4 and IPv6. - For IPv6, using `routed` mode, port 80 will be open on the container's IP address. Port 8080 will not be opened on the host's IP addresses, and outgoing packets will use the container's IP address. - For IPv4, using the default `nat` mode, the container's port 80 will be - accessible via port 8080 on the host's IP addresses, as well as directly. + accessible via port 8080 on the host's IP addresses, as well as directly + from within the Docker host. But, container port 80 cannot be accessed + directly from outside the host. Connections originating from the container will masquerade, using the host's IP address. @@ -214,9 +315,9 @@ configure the daemon to use the loopback address (`127.0.0.1`) instead. > [!WARNING] > -> Hosts within the same L2 segment (for example, hosts connected to the same -> network switch) can reach ports published to localhost. -> For more information, see +> In releases older than 28.0.0, hosts within the same L2 segment (for example, +> hosts connected to the same network switch) can reach ports published to +> localhost. For more information, see > [moby/moby#45610](https://github.com/moby/moby/issues/45610) To configure this setting for user-defined bridge networks, use @@ -254,16 +355,36 @@ Alternatively, you can use the `dockerd --ip` flag when starting the daemon. ## Docker on a router -Docker sets the policy for the `FORWARD` chain to `DROP`. This will prevent -your Docker host from acting as a router. +On Linux, Docker needs "IP Forwarding" enabled on the host. So, it enables +the `sysctl` settings `net.ipv4.ip_forward` and `net.ipv6.conf.all.forwarding` +it they are not already enabled when it starts. When it does that, it also +sets the policy of the iptables `FORWARD` chain to `DROP`. + +If Docker sets the policy for the `FORWARD` chain to `DROP`. This will prevent +your Docker host from acting as a router, it is the recommended setting when +IP Forwarding is enabled. -If you want your system to function as a router, you must add explicit -`ACCEPT` rules to the `DOCKER-USER` chain. For example: +To stop Docker from setting the `FORWARD` chain's policy to `DROP`, include +`"ip-forward-no-drop": true` in `/etc/docker/daemon.json`, or add option +`--ip-forward-no-drop` to the `dockerd` command line. + +Alternatively, you may add `ACCEPT` rules to the `DOCKER-USER` chain for the +packets you want to forward. For example: ```console $ iptables -I DOCKER-USER -i src_if -o dst_if -j ACCEPT ``` +> [!WARNING] +> +> In releases older than 28.0.0, Docker always set the default policy of the +> IPv6 `FORWARD` chain to `DROP`. In release 28.0.0 and newer, it will only +> set that policy if it enables IPv6 forwarding itself. This has always been +> the behaviour for IPv4 forwarding. +> +> If IPv6 forwarding is enabled on your host before Docker starts, check your +> host's configuration to make sure it is still secure. + ## Prevent Docker from manipulating iptables It is possible to set the `iptables` or `ip6tables` keys to `false` in diff --git a/content/manuals/engine/network/tutorials/standalone.md b/content/manuals/engine/network/tutorials/standalone.md index f9ee9490bce8..3e4c4a09d2a8 100644 --- a/content/manuals/engine/network/tutorials/standalone.md +++ b/content/manuals/engine/network/tutorials/standalone.md @@ -9,7 +9,7 @@ aliases: This series of tutorials deals with networking for standalone Docker containers. For networking with swarm services, see [Networking with swarm services](/manuals/engine/network/tutorials/overlay.md). If you need to -learn more about Docker networking in general, see the [overview](_index.md). +learn more about Docker networking in general, see the [overview](/manuals/engine/network/_index.md). This topic includes two different tutorials. You can run each of them on Linux, Windows, or a Mac, but for the last one, you need a second Docker diff --git a/content/manuals/engine/release-notes/27.md b/content/manuals/engine/release-notes/27.md index 9761f4edcf8b..a90a7b73dc70 100644 --- a/content/manuals/engine/release-notes/27.md +++ b/content/manuals/engine/release-notes/27.md @@ -8,10 +8,6 @@ toc_max: 2 tags: - Release notes aliases: -- /engine/release-notes/ -- /engine/release-notes/latest/ -- /release-notes/docker-ce/ -- /release-notes/docker-engine/ - /engine/release-notes/27.1/ - /engine/release-notes/27.0/ --- @@ -27,7 +23,7 @@ For more information about: Release notes for Docker Engine version 27.5 releases. -## 27.5.1 +### 27.5.1 {{< release-date date="2025-01-22" >}} @@ -37,12 +33,12 @@ For a full list of pull requests and changes in this release, refer to the relev - [moby/moby, 27.5.1 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A27.5.1) -### Bug fixes and enhancements +#### Bug fixes and enhancements - Fix an issue that could persistently prevent daemon startup after failure to initialize the default bridge. [moby/moby#49307](https://github.com/moby/moby/pull/49307) - Add a `DOCKER_IGNORE_BR_NETFILTER_ERROR` environment variable. Setting it to `1` allows running on hosts that cannot load `br_netfilter`. Some things won't work, including disabling inter-container communication in a bridge network. With the userland proxy disabled, it won't be possible to access one container's published ports from another container on the same network. [moby/moby#49306](https://github.com/moby/moby/pull/49306) -### Packaging updates +#### Packaging updates - Update Go runtime to 1.22.11 (fix CVE-2024-45341, CVE-2024-45336). [moby/moby#49312](https://github.com/moby/moby/pull/49312), [docker/docker-ce-packaging#1147](https://github.com/docker/docker-ce-packaging/pull/1147), [docker/cli#5762](https://github.com/docker/cli/pull/5762) - Update RootlessKit to v2.3.2 to support `passt` >= 2024_10_30.ee7d0b6. [moby/moby#49304](https://github.com/moby/moby/pull/49304) diff --git a/content/manuals/engine/release-notes/28.md b/content/manuals/engine/release-notes/28.md new file mode 100644 index 000000000000..69dd00cf58e3 --- /dev/null +++ b/content/manuals/engine/release-notes/28.md @@ -0,0 +1,725 @@ +--- +title: Docker Engine version 28 release notes +linkTitle: Engine v28 +description: Learn about the new features, bug fixes, and breaking changes for Docker Engine +keywords: docker, docker engine, ce, whats new, release notes +toc_min: 1 +toc_max: 2 +tags: + - Release notes +aliases: +- /engine/release-notes/ +- /engine/release-notes/latest/ +- /release-notes/docker-ce/ +- /release-notes/docker-engine/ +- /engine/release-notes/28.0/ +- /engine/release-notes/28.1/ +--- + +This page describes the latest changes, additions, known issues, and fixes for Docker Engine version 28. + +For more information about: + +- Deprecated and removed features, see [Deprecated Engine Features](../deprecated.md). +- Changes to the Engine API, see [Engine API version history](/reference/api/engine/version-history.md). + +## 28.3.2 + +{{< release-date date="2025-07-09" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 28.3.2 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A28.3.2) +- [moby/moby, 28.3.2 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A28.3.2) + +### Bug fixes and enhancements + +- Fix `--use-api-socket` not working correctly when targeting a remote daemon. [docker/cli#6157](https://github.com/docker/cli/pull/6157) +- Fix stray "otel error" logs being printed if debug logging is enabled. [docker/cli#6160](https://github.com/docker/cli/pull/6160) +- Quote SSH arguments when connecting to a remote daemon over an SSH connection to avoid unexpected expansion. [docker/cli#6147](https://github.com/docker/cli/pull/6147) +- Warn when `DOCKER_AUTH_CONFIG` is set during `docker login` and `docker logout`. [docker/cli#6163](https://github.com/docker/cli/pull/6163) + +### Packaging updates + +- Update Compose to [v2.38.2](https://github.com/docker/compose/releases/tag/v2.38.2). [docker/docker-ce-packaging#1225](https://github.com/docker/docker-ce-packaging/pull/1225) +- Update Docker Model CLI plugin to [v0.1.33](https://github.com/docker/model-cli/releases/tag/v0.1.33). [docker/docker-ce-packaging#1227](https://github.com/docker/docker-ce-packaging/pull/1227) +- Update Go runtime to 1.24.5. [moby/moby#50354](https://github.com/moby/moby/pull/50354) + +## 28.3.1 + +{{< release-date date="2025-07-02" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 28.3.1 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A28.3.1) +- [moby/moby, 28.3.1 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A28.3.1) + +### Packaging updates + +- Update BuildKit to [v0.23.2](https://github.com/moby/buildkit/releases/tag/v0.23.2). [moby/moby#50309](https://github.com/moby/moby/pull/50309) +- Update Compose to [v2.38.1](https://github.com/docker/compose/releases/tag/v2.38.1). [docker/docker-ce-packaging#1221](https://github.com/docker/docker-ce-packaging/pull/1221) +- Update Model to v0.1.32 which adds the support for the new top-level `models:` key in Docker Compose. [docker/docker-ce-packaging#1222](https://github.com/docker/docker-ce-packaging/pull/1222) + +## 28.3.0 + +{{< release-date date="2025-06-24" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 28.3.0 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A28.3.0) +- [moby/moby, 28.3.0 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A28.3.0) + +### New + +- Add support for AMD GPUs in `docker run --gpus`. [moby/moby#49952](https://github.com/moby/moby/pull/49952) +- Use `DOCKER_AUTH_CONFIG` as a credential store. [docker/cli#6008](https://github.com/docker/cli/pull/6008) + +### Bug fixes and enhancements + +- Ensure that the state of the container in the daemon database (used by [/containers/json](https://docs.docker.com/reference/api/engine/version/v1.49/#tag/Container/operation/ContainerList) API) is up to date when the container is stopped using the [/containers/{id}/stop](https://docs.docker.com/reference/api/engine/version/v1.49/#tag/Container/operation/ContainerStop) API (before response of API). [moby/moby#50136](https://github.com/moby/moby/pull/50136) +- Fix `docker image inspect inspect` omitting empty fields. [moby/moby#50135](https://github.com/moby/moby/pull/50135) +- Fix `docker images --tree` not marking images as in-use when the containerd image store is disabled. [docker/cli#6140](https://github.com/docker/cli/pull/6140) +- Fix `docker pull/push` hang in non-interactive when authentication is required caused by prompting for login credentials. [docker/cli#6141](https://github.com/docker/cli/pull/6141) +- Fix a potential resource leak when a node leaves a Swarm. [moby/moby#50115](https://github.com/moby/moby/pull/50115) +- Fix a regression where a login prompt on `docker pull` would show Docker Hub-specific hints when logging in on other registries. [docker/cli#6135](https://github.com/docker/cli/pull/6135) +- Fix an issue where all new tasks in the Swarm could get stuck in the PENDING state forever after scaling up a service with placement preferences. [moby/moby#50211](https://github.com/moby/moby/pull/50211) +- Remove an undocumented, hidden, top-level `docker remove` command that was accidentally introduced in Docker 23.0. [docker/cli#6144](https://github.com/docker/cli/pull/6144) +- Validate registry-mirrors configuration as part of `dockerd --validate` and improve error messages for invalid mirrors. [moby/moby#50240](https://github.com/moby/moby/pull/50240) +- `dockerd-rootless-setuptool.sh`: Fix the script from silently returning with no error message when subuid/subgid system requirements are not satisfied. [moby/moby#50059](https://github.com/moby/moby/pull/50059) +- containerd image store: Fix `docker push` not creating a tag on the remote repository. [moby/moby#50199](https://github.com/moby/moby/pull/50199) +- containerd image store: Improve handling of errors returned by the token server during `docker pull/push`. [moby/moby#50176](https://github.com/moby/moby/pull/50176) + +### Packaging updates + +- Allow customizing containerd service name for OpenRC. [moby/moby#50156](https://github.com/moby/moby/pull/50156) +- Update BuildKit to [v0.23.1](https://github.com/moby/buildkit/releases/tag/v0.23.1). [moby/moby#50243](https://github.com/moby/moby/pull/50243) +- Update Buildx to [v0.25.0](https://github.com/docker/buildx/releases/tag/v0.25.0). [docker/docker-ce-packaging#1217](https://github.com/docker/docker-ce-packaging/pull/1217) +- Update Compose to [v2.37.2](https://github.com/docker/compose/releases/tag/v2.37.2). [docker/docker-ce-packaging#1219](https://github.com/docker/docker-ce-packaging/pull/1219) +- Update Docker Model CLI plugin to [v0.1.30](https://github.com/docker/model-cli/releases/tag/v0.1.30). [docker/docker-ce-packaging#1218](https://github.com/docker/docker-ce-packaging/pull/1218) +- Update Go runtime to [1.24.4](https://go.dev/doc/devel/release#go1.24.4). [docker/docker-ce-packaging#1213](https://github.com/docker/docker-ce-packaging/pull/1213), [moby/moby#50153](https://github.com/moby/moby/pull/50153), [docker/cli#6124](https://github.com/docker/cli/pull/6124) + +### Networking + +- Revert Swarm related changes added in 28.2.x builds, due to a regression reported in https://github.com/moby/moby/issues/50129. [moby/moby#50169](https://github.com/moby/moby/pull/50169) + * Revert: Fix an issue where `docker network inspect --verbose` could sometimes crash the daemon (https://github.com/moby/moby/pull/49937). + * Revert: Fix an issue where the load-balancer IP address for an overlay network would not be released in certain cases if the Swarm was lacking an ingress network (https://github.com/moby/moby/pull/49948). + * Revert: Improve the reliability of NetworkDB in busy clusters and lossy networks (https://github.com/moby/moby/pull/49932). + * Revert: Improvements to the reliability and convergence speed of NetworkDB (https://github.com/moby/moby/pull/49939). +- Fix an issue that could cause container startup to fail, or lead to failed UDP port mappings, when some container ports are mapped to `0.0.0.0` and others are mapped to specific host addresses. [moby/moby#50054](https://github.com/moby/moby/pull/50054) +- The `network inspect` response for an overlay network now reports that `EnableIPv4` is true. [moby/moby#50147](https://github.com/moby/moby/pull/50147) +- Windows: Improve daemon startup time in cases where the host has networks of type `"Mirrored"`. [moby/moby#50155](https://github.com/moby/moby/pull/50155) +- Windows: Make sure `docker system prune` and `docker network prune` only remove networks created by Docker. [moby/moby#50154](https://github.com/moby/moby/pull/50154) + +### API + +- Update API version to 1.51. [moby/moby#50145](https://github.com/moby/moby/pull/50145) +- `GET /images/json` now sets the value of the `Containers` field for all images to the count of containers using the image. [moby/moby#50146](https://github.com/moby/moby/pull/50146) + +### Deprecations + +- Empty/nil image config fields in the `GET /images/{name}/json` response are now deprecated and will be removed in v29.0. [docker/cli#6129](https://github.com/docker/cli/pull/6129) +- api/types/container: deprecate `ExecOptions.Detach`. This field is not used, and will be removed in a future release. [moby/moby#50219](https://github.com/moby/moby/pull/50219) +- pkg/idtools: deprecate `IdentityMapping` and `Identity.Chown`. [moby/moby#50210](https://github.com/moby/moby/pull/50210) + +## 28.2.2 + +{{< release-date date="2025-05-30" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 28.2.2 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A28.2.2) +- [moby/moby, 28.2.2 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A28.2.2) + +### Bug fixes and enhancements + +- containerd image store: Fix a regression causing `docker build --push` to fail. This reverts [the fix](https://github.com/moby/moby/pull/49702) for `docker build` not persisting overridden images as dangling. [moby/moby#50105](https://github.com/moby/moby/pull/50105) + +### Networking + +- When creating the iptables `DOCKER-USER` chain, do not add an explicit `RETURN` rule, allowing users to append as well as insert their own rules. Existing rules are not removed on upgrade, but it won't be replaced after a reboot. [moby/moby#50098](https://github.com/moby/moby/pull/50098) + +## 28.2.1 + +{{< release-date date="2025-05-29" >}} + +## Packaging updates + +- Fix packaging regression in [v28.2.0](https://github.com/moby/moby/releases/tag/v28.2.0) which broke creating the `docker` group/user on fresh installations. [docker-ce-packaging#1209](https://github.com/docker/docker-ce-packaging/issues/1209) + +## 28.2.0 + +{{< release-date date="2025-05-28" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 28.2.0 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A28.2.0) +- [moby/moby, 28.2.0 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A28.2.0) + +> [!NOTE] +> RHEL packages are currently not available and will be released later. + +### New + +- Add `{{.Platform}}` as formatting option for `docker ps` to show the platform of the image the container is running. [docker/cli#6042](https://github.com/docker/cli/pull/6042) +- Add support for relative parent paths (`../`) on bind mount sources when using `docker run/create` with `-v/--volume` or `--mount type=bind` options. [docker/cli#4966](https://github.com/docker/cli/pull/4966) +- CDI is now enabled by default. [moby/moby#49963](https://github.com/moby/moby/pull/49963) +- Show discovered CDI devices in `docker info`. [docker/cli#6078](https://github.com/docker/cli/pull/6078) +- `docker image rm`: add `--platform` option to remove a variant from multi-platform images. [docker/cli#6109](https://github.com/docker/cli/pull/6109) +- containerd image store: Initial BuildKit support for building Windows container images on Windows (requires an opt-in with `DOCKER_BUILDKIT=1`). [moby/moby#49740](https://github.com/moby/moby/pull/49740) + +### Bug fixes and enhancements + +- Add a new log option for fluentd log driver (`fluentd-write-timeout`), which enables specifying write timeouts for fluentd connections. [moby/moby#49911](https://github.com/moby/moby/pull/49911) +- Add support for `DOCKER_AUTH_CONFIG` for the experimental `--use-api-socket` option. [docker/cli#6019](https://github.com/docker/cli/pull/6019) +- Fix `docker exec` waiting for 10 seconds if a non-existing user or group was specified. [moby/moby#49868](https://github.com/moby/moby/pull/49868) +- Fix `docker swarm init` ignoring `cacert` option of `--external-ca`. [docker/cli#5995](https://github.com/docker/cli/pull/5995) +- Fix an issue where the CLI would not correctly save the configuration file (`~/.docker/config.json`) if it was a relative symbolic link. [docker/cli#5282](https://github.com/docker/cli/pull/5282) +- Fix containers with `--restart always` policy using CDI devices failing to start on daemon restart. [moby/moby#49990](https://github.com/moby/moby/pull/49990) +- Fix shell-completion to only complete some flags once, even though they can be set multiple times. [docker/cli#6030](https://github.com/docker/cli/pull/6030) +- Fix the `plugin does not implement PluginAddr interface` error for Swarm CSI drivers. [moby/moby#49961](https://github.com/moby/moby/pull/49961) +- Improve `docker login` error messages for invalid options. [docker/cli#6036](https://github.com/docker/cli/pull/6036) +- Make sure the terminal state is restored if the CLI is forcefully terminated. [docker/cli#6058](https://github.com/docker/cli/pull/6058) +- Update the default seccomp profile to match the libseccomp v2.6.0. The new syscalls are: `listmount`, `statmount`, `lsm_get_self_attr`, `lsm_list_modules`, `lsm_set_self_attr`, `mseal`, `uretprobe`, `riscv_hwprobe`, `getxattrat`, `listxattrat`, `removexattrat`, and `setxattrat`. This prevents containers from receiving EPERM errors when using them. [moby/moby#50077](https://github.com/moby/moby/pull/50077) +- `docker inspect`: add shell completion, improve flag-description for `--type` and improve validation. [docker/cli#6052](https://github.com/docker/cli/pull/6052) +- containerd image store: Enable BuildKit garbage collector by default. [moby/moby#49899](https://github.com/moby/moby/pull/49899) +- containerd image store: Fix `docker build` not persisting overridden images as dangling. [moby/moby#49702](https://github.com/moby/moby/pull/49702) +- containerd image store: Fix `docker system df` reporting a negative reclaimable space amount. [moby/moby#49707](https://github.com/moby/moby/pull/49707) +- containerd image store: Fix duplicate `PUT` requests when pushing a multi-platform image. [moby/moby#49949](https://github.com/moby/moby/pull/49949) + +### Packaging updates + +- Drop Ubuntu 20.04 "Focal" packages as it reached end of life. [docker/docker-ce-packaging#1200](https://github.com/docker/docker-ce-packaging/pull/1200) +- Fix install location for RPM-based `docker-ce` man-pages. [docker/docker-ce-packaging#1203](https://github.com/docker/docker-ce-packaging/pull/1203) +- Update BuildKit to [v0.22.0](https://github.com/moby/buildkit/releases/tag/v0.22.0). [moby/moby#50046](https://github.com/moby/moby/pull/50046) +- Update Buildx to [v0.24.0](https://github.com/docker/buildx/releases/tag/v0.24.0). [docker/docker-ce-packaging#1205](https://github.com/docker/docker-ce-packaging/pull/1205) +- Update Compose to [v2.36.2](https://github.com/docker/compose/releases/tag/v2.36.2). [docker/docker-ce-packaging#1208](https://github.com/docker/docker-ce-packaging/pull/1208) +- Update Go runtime to [1.24.3](https://go.dev/doc/devel/release#go1.24.3). [docker/docker-ce-packaging#1192](https://github.com/docker/docker-ce-packaging/pull/1192), [docker/cli#6060](https://github.com/docker/cli/pull/6060), [moby/moby#49174](https://github.com/moby/moby/pull/49174) + +### Networking + +- Add bridge network option `"com.docker.network.bridge.trusted_host_interfaces"`, accepting a colon-separated list of interface names. These interfaces have direct access to published ports on container IP addresses. [moby/moby#49832](https://github.com/moby/moby/pull/49832) +- Add daemon option `"allow-direct-routing"` to disable filtering of packets from outside the host addressed directly to containers. [moby/moby#49832](https://github.com/moby/moby/pull/49832) +- Do not display network options `com.docker.network.enable_ipv4` or `com.docker.network.enable_ipv6` in inspect output if they have been overridden by `EnableIPv4` or `EnableIPv6` in the network create request. [moby/moby#49866](https://github.com/moby/moby/pull/49866) +- Fix an issue that could cause network deletion to fail after a daemon restart, with error "has active endpoints" listing empty endpoint names. [moby/moby#49901](https://github.com/moby/moby/pull/49901) +- Fix an issue where `docker network inspect --verbose` could sometimes crash the daemon. [moby/moby#49937](https://github.com/moby/moby/pull/49937) +- Fix an issue where the load-balancer IP address for an overlay network would not be released in certain cases if the Swarm was lacking an ingress network. [moby/moby#49948](https://github.com/moby/moby/pull/49948) +- Improve the reliability of NetworkDB in busy clusters and lossy networks. [moby/moby#49932](https://github.com/moby/moby/pull/49932) +- Improvements to the reliability and convergence speed of NetworkDB. [moby/moby#49939](https://github.com/moby/moby/pull/49939) + +### API + +- `DELETE /images/{name}` now supports a `platforms` query parameter. It accepts an array of JSON-encoded OCI Platform objects, allowing for selecting a specific platforms to delete content for. [moby/moby#49982](https://github.com/moby/moby/pull/49982) +- `GET /info` now includes a `DiscoveredDevices` field. This is an array of `DeviceInfo` objects, each providing details about a device discovered by a device driver. [moby/moby#49980](https://github.com/moby/moby/pull/49980) + +### Go SDK + +- `api/types/container`: add `ContainerState` and constants for container state. [moby/moby#49965](https://github.com/moby/moby/pull/49965) +- `api/types/container`: change `Summary.State` to a `ContainerState`. [moby/moby#49991](https://github.com/moby/moby/pull/49991) +- `api/types/container`: define `HealthStatus` type for health-status constants. [moby/moby#49876](https://github.com/moby/moby/pull/49876) +- `api/types`: deprecate `BuildResult`, `ImageBuildOptions`, `ImageBuildOutput`, `ImageBuildResponse`, `BuilderVersion`, `BuilderV1`, and `BuilderBuildKi` which were moved to `api/types/build`. [moby/moby#50025](https://github.com/moby/moby/pull/50025) + +### Deprecations + +- API: Deprecated: `GET /images/{name}/json` no longer returns the following fields: `Config`, `Hostname`, `Domainname`, `AttachStdin`, `AttachStdout`, `AttachStderr`, `Tty`, `OpenStdin`, `StdinOnce`, `Image`, `NetworkDisabled` (already omitted unless set), `MacAddress` (already omitted unless set), `StopTimeout` (already omitted unless set). These additional fields were included in the response due to an implementation detail but not part of the image's Configuration, were marked deprecated in API v1.46, and are now omitted. [moby/moby#48457](https://github.com/moby/moby/pull/48457) +- Go-SDK: Deprecate builder/remotecontext.Rel(). This function was needed on older versions of Go, but can now be replaced by `filepath.Rel()`. [moby/moby#49843](https://github.com/moby/moby/pull/49843) +- Go-SDK: api/types: deprecate `BuildCachePruneOptions` in favor of `api/types/builder.CachePruneOptions`. [moby/moby#50015](https://github.com/moby/moby/pull/50015) +- Go-SDK: api/types: deprecate `BuildCachePruneReport` in favor of `api/types/builder.CachePruneReport`. [moby/moby#50015](https://github.com/moby/moby/pull/50015) +- Go-SDK: api/types: deprecate `NodeListOptions`, `NodeRemoveOptions`, `ServiceCreateOptions`, `ServiceUpdateOptions`, `RegistryAuthFromSpec`, `RegistryAuthFromPreviousSpec`, `ServiceListOptions`, `ServiceInspectOptions`, and `SwarmUnlockKeyResponse` which were moved to `api/types/swarm`. [moby/moby#50027](https://github.com/moby/moby/pull/50027) +- Go-SDK: api/types: deprecate `SecretCreateResponse`, `SecretListOptions`, `ConfigCreateResponse`, `ConfigListOptions` which were moved to api/types/swarm. [moby/moby#50024](https://github.com/moby/moby/pull/50024) +- Go-SDK: client: deprecate `IsErrNotFound`. [moby/moby#50012](https://github.com/moby/moby/pull/50012) +- Go-SDK: container: deprecate `IsValidHealthString` in favor of `api/types/container.ValidateHealthStatus`. [moby/moby#49893](https://github.com/moby/moby/pull/49893) +- Go-SDK: container: deprecate `StateStatus`, `WaitCondition`, and the related `WaitConditionNotRunning`, `WaitConditionNextExit`, and `WaitConditionRemoved` consts in favor of their equivalents in `api/types/container`. [moby/moby#49874](https://github.com/moby/moby/pull/49874) +- Go-SDK: opts: deprecate `ListOpts.GetAll` in favor of `ListOpts.GetSlice`. [docker/cli#6032](https://github.com/docker/cli/pull/6032) +- Remove deprecated `IsAutomated` formatting placeholder from `docker search`. [docker/cli#6091](https://github.com/docker/cli/pull/6091) +- Remove fallback for pulling images from non-OCI-compliant `docker.pkg.github.com` registry. [moby/moby#50094](https://github.com/moby/moby/pull/50094) +- Remove support for pulling legacy v2, schema 1 images and remove `DOCKER_ENABLE_DEPRECATED_PULL_SCHEMA_1_IMAGE` environment-variable. [moby/moby#50036](https://github.com/moby/moby/pull/50036), [moby/moby#42300](https://github.com/moby/moby/pull/42300) +- The `BridgeNfIptables` and `BridgeNfIp6tables` fields in the `GET /info` response were deprecated in API v1.48, and are now omitted in API v1.50. [moby/moby#49904](https://github.com/moby/moby/pull/49904) +- errdefs: Deprecate `errdefs.FromStatusCode`. Use containerd's `errhttp.ToNative` instead. [moby/moby#50030](https://github.com/moby/moby/pull/50030) + +## 28.1.1 + +{{< release-date date="2025-04-18" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 28.1.1 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A28.1.1) +- [moby/moby, 28.1.1 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A28.1.1) + +### Bug fixes and enhancements + +- Fix `dockerd-rootless-setuptool.sh` incorrectly reporting missing `iptables`. [moby/moby#49833](https://github.com/moby/moby/pull/49833) +- containerd image store: Fix a potential daemon crash when using `docker load` with archives containing zero-size tar headers. [moby/moby#49837](https://github.com/moby/moby/pull/49837) + +### Packaging updates + +- Update Buildx to [v0.23.0](https://github.com/docker/buildx/releases/tag/v0.23.0). [docker/docker-ce-packaging#1185](https://github.com/docker/docker-ce-packaging/pull/1185) +- Update Compose to [v2.35.1](https://github.com/docker/compose/releases/tag/v2.35.1). [docker/docker-ce-packaging#1188](https://github.com/docker/docker-ce-packaging/pull/1188) + +### Networking + +- Add a warning to a container's `/etc/resolv.conf` when no upstream DNS servers were found. [moby/moby#49827](https://github.com/moby/moby/pull/49827) + +## 28.1.0 + +{{< release-date date="2025-04-17" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 28.1.0 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A28.1.0) +- [moby/moby, 28.1.0 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A28.1.0) + +### New + +- Add `docker bake` sub-command as alias for `docker buildx bake`. [docker/cli#5947](https://github.com/docker/cli/pull/5947) +- Experimental: add a new `--use-api-socket` flag on `docker run` and `docker create` to enable access to Docker socket from inside a container and to share credentials from the host with the container. [docker/cli#5858](https://github.com/docker/cli/pull/5858) +- `docker image inspect` now supports a `--platform` flag to inspect a specific platform of a multi-platform image. [docker/cli#5934](https://github.com/docker/cli/pull/5934) + +### Bug fixes and enhancements + +- Add CLI shell-completion for context names. [docker/cli#6016](https://github.com/docker/cli/pull/6016) +- Fix `docker images --tree` not including non-container images content size in the total image content size. [docker/cli#6000](https://github.com/docker/cli/pull/6000) +- Fix `docker load` not preserving replaced images. [moby/moby#49650](https://github.com/moby/moby/pull/49650) +- Fix `docker login` hints when logging in to a custom registry. [docker/cli#6015](https://github.com/docker/cli/pull/6015) +- Fix `docker stats` not working properly on machines with high CPU core count. [moby/moby#49734](https://github.com/moby/moby/pull/49734) +- Fix a regression causing `docker pull/push` to fail when interacting with a private repository. [docker/cli#5964](https://github.com/docker/cli/pull/5964) +- Fix an issue preventing rootless Docker setup on a host with no `ip_tables` kernel module. [moby/moby#49727](https://github.com/moby/moby/pull/49727) +- Fix an issue that could lead to unwanted iptables rules being restored and never deleted following a firewalld reload. [moby/moby#49728](https://github.com/moby/moby/pull/49728) +- Improve CLI completion of `docker service scale`. [docker/cli#5968](https://github.com/docker/cli/pull/5968) +- `docker images --tree` now hides both untagged and dangling images by default. [docker/cli#5924](https://github.com/docker/cli/pull/5924) +- `docker system info` will provide an exit code if a connection cannot be established to the Docker daemon. [docker/cli#5918](https://github.com/docker/cli/pull/5918) +- containerd image store: Fix `image tag` event not being emitted when building with BuildKit. [moby/moby#49678](https://github.com/moby/moby/pull/49678) +- containerd image store: Improve `docker push/pull` handling of remote registry errors. [moby/moby#49770](https://github.com/moby/moby/pull/49770) +- containerd image store: Show pull progress for non-layer image blobs. [moby/moby#49746](https://github.com/moby/moby/pull/49746) + +### Packaging updates + +- Add Debian "Trixie" packages. [docker/docker-ce-packaging#1181](https://github.com/docker/docker-ce-packaging/pull/1181) +- Add Fedora 42 packages. [docker/containerd-packaging#418](https://github.com/docker/containerd-packaging/pull/418), [docker/docker-ce-packaging#1169](https://github.com/docker/docker-ce-packaging/pull/1169) +- Add Ubuntu 25.04 "Plucky Puffin" packages. [docker/containerd-packaging#419](https://github.com/docker/containerd-packaging/pull/419), [docker/docker-ce-packaging#1177](https://github.com/docker/docker-ce-packaging/pull/1177) +- Update BuildKit to [v0.21.0](https://github.com/moby/buildkit/releases/tag/v0.21.0). [moby/moby#49809](https://github.com/moby/moby/pull/49809) +- Update Compose to [v2.35.0](https://github.com/docker/compose/releases/tag/v2.35.0). [docker/docker-ce-packaging#1183](https://github.com/docker/docker-ce-packaging/pull/1183) +- Update Go runtime to [1.23.8](https://go.dev/doc/devel/release#go1.23.8). [docker/cli#5986](https://github.com/docker/cli/pull/5986), [docker/docker-ce-packaging#1180](https://github.com/docker/docker-ce-packaging/pull/1180), [moby/moby#49737](https://github.com/moby/moby/pull/49737) + +### Networking + +- Fix a bug causing host port-mappings on Swarm containers to be duplicated on `docker ps` and `docker inspect`. [moby/moby#49724](https://github.com/moby/moby/pull/49724) +- Fix an issue that caused container network attachment to fail with error "Bridge port not forwarding". [moby/moby#49705](https://github.com/moby/moby/pull/49705) +- Fix an issue with removal of a `--link` from a container in the default bridge network. [moby/moby#49778](https://github.com/moby/moby/pull/49778) +- Improve how network-endpoint relationships are tracked to reduce the chance of the "has active endpoints" error to be wrongfully returned. [moby/moby#49736](https://github.com/moby/moby/pull/49736) +- Improve the "has active endpoints" error message by including the name of endpoints still connected to the network being deleted. [moby/moby#49773](https://github.com/moby/moby/pull/49773) + +### API + +- Update API version to [v1.49](https://docs.docker.com/engine/api/v1.49/). [moby/moby#49718](https://github.com/moby/moby/pull/49718) +- `GET /image/{name}/json` now supports a `platform` parameter allowing to specify which platform variant of a multi-platform image to inspect. [moby/moby#49586](https://github.com/moby/moby/pull/49586) +- `GET /info` now returns a `FirewallBackend` containing information about the daemon's firewalling configuration. [moby/moby#49761](https://github.com/moby/moby/pull/49761) + +### Go SDK + +- Update minimum required Go version to go1.23. [docker/cli#5868](https://github.com/docker/cli/pull/5868) +- cli/command/context: remove temporary `ContextType` field from JSON output. [docker/cli#5981](https://github.com/docker/cli/pull/5981) +- client: Keep image references in canonical format where possible. [moby/moby#49609](https://github.com/moby/moby/pull/49609) + +### Deprecations + +- API: Deprecated `AllowNondistributableArtifactsCIDRs` and `AllowNondistributableArtifactsHostnames` fields in the `RegistryConfig` struct in the `GET /info` response are omitted in API v1.49. [moby/moby#49749](https://github.com/moby/moby/pull/49749) +- API: Deprecated: The `ContainerdCommit.Expected`, `RuncCommit.Expected`, and `InitCommit.Expected` fields in the `GET /info` endpoint were deprecated in API v1.48, and are now omitted in API v1.49. [moby/moby#48556](https://github.com/moby/moby/pull/48556) +- Go-SDK: cli/command/image: Deprecate `RunPull`: this function was only used internally and will be removed in the next release. [docker/cli#5975](https://github.com/docker/cli/pull/5975) +- Go-SDK: cli/config/configfile: deprecate `ConfigFile.Experimental` field. Experimental CLI features are always enabled since version v20.10 and this field is no longer used. Use `ConfigFile.Features` instead for optional features. This field will be removed in a future release. [docker/cli#5977](https://github.com/docker/cli/pull/5977) +- Go-SDK: deprecate `pkg/archive`, which was migrated to `github.com/moby/go-archive`. [moby/moby#49743](https://github.com/moby/moby/pull/49743) +- Go-SDK: deprecate `pkg/atomicwriter`, which was migrated to `github.com/moby/sys/atomicwriter`. [moby/moby#49748](https://github.com/moby/moby/pull/49748) +- Go-SDK: opts: remove deprecated `PortOpt`, `ConfigOpt`, `SecretOpt` aliases. [docker/cli#5953](https://github.com/docker/cli/pull/5953) +- Go-SDK: registry: deprecate `APIEndpoint.Official` field. [moby/moby#49706](https://github.com/moby/moby/pull/49706) + +## 28.0.4 + +{{< release-date date="2025-03-25" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 28.0.4 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A28.0.4) +- [moby/moby, 28.0.4 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A28.0.4) + +### Bug fixes and enhancements + +- Fix a regression causing `docker pull/push` to fail when interacting with a private repository. [docker/cli#5964](https://github.com/docker/cli/pull/5964) + + +## 28.0.3 + +{{< release-date date="2025-03-25" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 28.0.3 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A28.0.3) +- [moby/moby, 28.0.3 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A28.0.3) + +### Bug fixes and enhancements + +- Fix `docker run` truncating the `STDOUT`/`STDERR` prematurely when the container exits before the data is consumed. [docker/cli#5957](https://github.com/docker/cli/pull/5957) + +### Packaging updates + +- Update BuildKit to [v0.20.2](https://github.com/moby/buildkit/releases/tag/v0.20.2). [moby/moby#49698](https://github.com/moby/moby/pull/49698) +- Update `runc` to [v1.2.6](https://github.com/opencontainers/runc/releases/tag/v1.2.6). [moby/moby#49682](https://github.com/moby/moby/pull/49682) +- Update containerd to [v1.7.26](https://github.com/containerd/containerd/releases/tag/v1.7.26). [docker/containerd-packaging#409](https://github.com/docker/containerd-packaging/pull/409) + +## 28.0.2 + +{{< release-date date="2025-03-19" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 28.0.2 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A28.0.2) +- [moby/moby, 28.0.2 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A28.0.2) + +### Bug fixes and enhancements + +- Fix CLI-specific attributes (`docker.cli.*`) being unintentionally passed to downstream OTel services. [docker/cli#5842](https://github.com/docker/cli/pull/5842) +- Fix an issue where user-specified `OTEL_RESOURCE_ATTRIBUTES` were being overridden by CLI's internal telemetry attributes. The CLI now properly merges user-specified attributes with internal ones, allowing both to coexist. [docker/cli#5842](https://github.com/docker/cli/pull/5842) +- Fix the daemon failing to start on Windows when a container created before v28.0.0 was present. [moby/moby#49626](https://github.com/moby/moby/pull/49626) +- Fix possible error on `docker buildx prune` with `--min-free-space`. [moby/moby#49623](https://github.com/moby/moby/pull/49623) +- Fix spurious `io: read/write on closed pipe` error in the daemon log when closing a container. [moby/moby#49590](https://github.com/moby/moby/pull/49590) +- Fix the Docker daemon failing too early if the containerd socket isn't immediately available. [moby/moby#49603](https://github.com/moby/moby/pull/49603) +- Mask Linux thermal interrupt info in a container's `/proc` and `/sys` by default. [moby/moby#49560](https://github.com/moby/moby/pull/49560) +- Update `contrib/check-config.sh` to check for more kernel modules related to iptables. [moby/moby#49622](https://github.com/moby/moby/pull/49622) +- containerd image store: Fix integer overflow in User ID handling passed via `--user`. [moby/moby#49652](https://github.com/moby/moby/pull/49652) +- containerd image store: Fix spurious `reference for unknown type: application/vnd.in-toto+json` warning being logged to the daemon's log. [moby/moby#49652](https://github.com/moby/moby/pull/49652) +- containerd image store: Improve performance of `docker ps` when running a large number of containers. [moby/moby#49365](https://github.com/moby/moby/pull/49365) + +### Packaging updates + +- Update BuildKit to [v0.20.1](https://github.com/moby/buildkit/releases/tag/v0.20.1). [moby/moby#49587](https://github.com/moby/moby/pull/49587) +- Update Buildx to [v0.22.0](https://github.com/docker/buildx/releases/tag/v0.22.0). [docker/docker-ce-packaging#1175](https://github.com/docker/docker-ce-packaging/pull/1175) +- Update Compose to [v2.34.0](https://github.com/docker/compose/releases/tag/v2.34.0). [docker/docker-ce-packaging#1172](https://github.com/docker/docker-ce-packaging/pull/1172) +- Update Go runtime to [1.23.7](https://go.dev/doc/devel/release#go1.23.7). [docker/cli#5890](https://github.com/docker/cli/pull/5890), [docker/docker-ce-packaging#1171](https://github.com/docker/docker-ce-packaging/pull/1171), [moby/moby#49580](https://github.com/moby/moby/pull/49580) +- Update RootlessKit to [v2.3.4](https://github.com/rootless-containers/rootlesskit/releases/tag/v2.3.4). [moby/moby#49614](https://github.com/moby/moby/pull/49614) +- Update containerd (static binaries only) to [v1.7.27](https://www.github.com/containerd/containerd/releases/tag/v1.7.27). [moby/moby#49656](https://github.com/moby/moby/pull/49656) + +### Networking + +- Add the environment variable `DOCKER_INSECURE_NO_IPTABLES_RAW=1` to allow Docker to run on systems where the Linux kernel can't provide `CONFIG_IP_NF_RAW` support. When enabled, Docker will not create rules in the iptables `raw` table. Warning: This is not recommended for production environments as it reduces security by allowing other hosts on the local network to route to ports published to host addresses, even when they are published to `127.0.0.1.` This option bypasses some of the security hardening introduced in Docker Engine 28.0.0. [moby/moby#49621](https://github.com/moby/moby/pull/49621) +- Allow container startup when an endpoint is attached to a macvlan network driver where the parent interface is down. [moby/moby#49630](https://github.com/moby/moby/pull/49630) +- Do not skip DNAT for packets originating in a `gateway_mode=routed` network. [moby/moby#49577](https://github.com/moby/moby/pull/49577) +- Fix a bug causing `docker ps` to inconsistently report dual-stack port mappings. [moby/moby#49657](https://github.com/moby/moby/pull/49657) +- Fix a bug that could cause `docker-proxy` to stop forwarding UDP datagrams to containers. [moby/moby#49649](https://github.com/moby/moby/pull/49649) +- Fix a bug that was causing `docker-proxy` to close UDP connections to containers eagerly and resulting in the source address to change needlessly. [moby/moby#49649](https://github.com/moby/moby/pull/49649) + +### Go SDK + +- Move various types and consts from `cli-plugins/manager` to a separate package. [docker/cli#5902](https://github.com/docker/cli/pull/5902) +- Update minimum required Go version to go1.23. [moby/moby#49541](https://github.com/moby/moby/pull/49541) +- `cli/command`: Move `PrettyPrint` utility to `cli/command/formatter`. [docker/cli#5916](https://github.com/docker/cli/pull/5916) +- runconfig/errors: split `ErrConflictHostNetwork` into `ErrConflictConnectToHostNetwork` and `ErrConflictDisconnectFromHostNetwork`. [moby/moby#49605](https://github.com/moby/moby/pull/49605) + +### Deprecations + +- Go-SDK: Deprecate `cli-plugins/manager.ResourceAttributesEnvvar` constant. It was used internally, but holds the `OTEL_RESOURCE_ATTRIBUTES` name, which is part of the OpenTelemetry specification. Users of this constant should define their own. It will be removed in the next release. [docker/cli#5881](https://github.com/docker/cli/pull/5881) +- Go-SDK: Deprecate `opts.PortOpt`, `opts.ConfigOpt` and `opts.SecretOpt`. These types were moved to the `opts/swarmopts` package. [docker/cli#5907](https://github.com/docker/cli/pull/5907) +- Go-SDK: Remove `service/logs` package. [docker/cli#5910](https://github.com/docker/cli/pull/5910) +- Go-SDK: `cli/command/image`: Deprecate `PushTrustedReference` and move to `cli/trust`. [docker/cli#5894](https://github.com/docker/cli/pull/5894) +- Go-SDK: `cli/command/image`: Deprecate and internalize `TrustedPush`. [docker/cli#5894](https://github.com/docker/cli/pull/5894) +- Go-SDK: `cli/command`: deprecate `Cli.NotaryClient`: use [`trust.GetNotaryRepository`](https://pkg.go.dev/github.com/docker/cli@v28.0.1+incompatible/cli/trust#GetNotaryRepository) instead. This method is no longer used and will be removed in the next release. [docker/cli#5885](https://github.com/docker/cli/pull/5885) +- Go-SDK: `cli/command`: deprecate `Cli.RegistryClient`. This method was only used internally and will be removed in the next release. Use [`client.NewRegistryClient`](https://pkg.go.dev/github.com/docker/cli@v28.0.1+incompatible/cli/registry/client#NewRegistryClient) instead. [docker/cli#5889](https://github.com/docker/cli/pull/5889), [docker/cli#5889](https://github.com/docker/cli/pull/5889) +- Go-SDK: `registry`: Deprecate `RepositoryInfo.Official` field. [moby/moby#49567](https://github.com/moby/moby/pull/49567) +- Go-SDK: `registry`: deprecate `HostCertsDir`: this function was only used internally and will be removed in the next release. [moby/moby#49612](https://github.com/moby/moby/pull/49612) +- Go-SDK: `registry`: deprecate `SetCertsDir`: the cert-directory is now automatically selected when running with RootlessKit, and should no longer be set manually. [moby/moby#49612](https://github.com/moby/moby/pull/49612) + +## 28.0.1 + +{{< release-date date="2025-02-26" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 28.0.1 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A28.0.1) +- [moby/moby, 28.0.1 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A28.0.1) + +### Networking + +- Remove dependency on kernel modules `ip_set`, `ip_set_hash_net` and `netfilter_xt_set`. + * The dependency was introduced in release 28.0.0 but proved too disruptive. The iptables rules using these modules have been replaced. [moby/moby#49530](https://github.com/moby/moby/pull/49530) +- Allow daemon startup on a host with IPv6 disabled without requiring `--ip6tables=false`. [moby/moby#49525](https://github.com/moby/moby/pull/49525) +- Fix a bug that was causing containers with `--restart=always` and a published port already in use to restart in a tight loop. [moby/moby#49507](https://github.com/moby/moby/pull/49507) +- Fix an issue with Swarm ingress, caused by incorrect ordering of iptables rules. [moby/moby#49538](https://github.com/moby/moby/pull/49538) +- Fix creation of a swarm-scoped network from a `--config-only` network. [moby/moby#49521](https://github.com/moby/moby/pull/49521) +- Fix `docker network inspect` reporting an IPv6 gateway with CIDR suffix for a newly created network with no specific IPAM config, until a daemon restart. [moby/moby#49520](https://github.com/moby/moby/pull/49520) +- Improve the error reported when kernel modules `ip_set`, `ip_set_hash_net` and `netilter_xt_set` are not available. [moby/moby#49524](https://github.com/moby/moby/pull/49524) +- Move most of Docker's iptables rules out of the filter-FORWARD chain, so that other applications are free to append rules that must follow Docker's rules. [moby/moby#49518](https://github.com/moby/moby/pull/49518) +- Update `--help` output and man page lo state which options only apply to the default bridge network. [moby/moby#49522](https://github.com/moby/moby/pull/49522) + + +### Bug fixes and enhancements + +- Fix `docker context create` always returning an error when using the `"skip-tls-verify"` option. [docker/cli#5850](https://github.com/docker/cli/pull/5850) +- Fix shell completion suggesting IDs instead of names for services and nodes. [docker/cli#5848](https://github.com/docker/cli/pull/5848) +- Fix unintentionally printing exit status to standard error output when `docker exec/run` returns a non-zero status. [docker/cli#5854](https://github.com/docker/cli/pull/5854) +- Fix regression `protocol "tcp" is not supported by the RootlessKit port driver "slirp4netns"`. [moby/moby#49514](https://github.com/moby/moby/pull/49514) +- containerd image store: Fix `docker inspect` not being able to show multi-platform images with missing layers for all platforms. [moby/moby#49533](https://github.com/moby/moby/pull/49533) +- containerd image store: Fix `docker images --tree` reporting wrong content size. [moby/moby#49535](https://github.com/moby/moby/pull/49535) +- Fix compilation on i386 [moby/moby#49526](https://github.com/moby/moby/pull/49526) + +### Packaging updates + +- Update `github.com/go-jose/go-jose/v4` to v4.0.5 to address [GHSA-c6gw-w398-hv78](https://github.com/go-jose/go-jose/security/advisories/GHSA-c6gw-w398-hv78) / [CVE-2025-27144](https://www.cve.org/CVERecord?id=CVE-2025-27144). [docker/cli#5867](https://github.com/docker/cli/pull/5867) +- Update Buildx to [v0.21.1](https://github.com/docker/buildx/releases/tag/v0.21.1). [docker/docker-ce-packaging#1167](https://github.com/docker/docker-ce-packaging/pull/1167) +- Update Compose to [v2.33.1](https://github.com/docker/compose/releases/tag/v2.33.1). [docker/docker-ce-packaging#1168](https://github.com/docker/docker-ce-packaging/pull/1168) + +### API + +- containerd image store: Fix `GET /images/json?manifests=1` not filling `Manifests` for index-only images [moby/moby#49533](https://github.com/moby/moby/pull/49533) +- containerd image store: Fix `GET /images/json and /images//json` `Size.Content` field including the size of content that's not available locally [moby/moby#49535](https://github.com/moby/moby/pull/49535) + + +## 28.0.0 + +{{< release-date date="2025-02-19" >}} + +For a full list of pull requests and changes in this release, refer to the relevant GitHub milestones: + +- [docker/cli, 28.0.0 milestone](https://github.com/docker/cli/issues?q=is%3Aclosed+milestone%3A28.0.0) +- [moby/moby, 28.0.0 milestone](https://github.com/moby/moby/issues?q=is%3Aclosed+milestone%3A28.0.0) +- Deprecated and removed features, see [Deprecated Features](https://github.com/docker/cli/blob/v28.0.0/docs/deprecated.md). +- Changes to the Engine API, see [API version history](https://github.com/moby/moby/blob/v28.0.0/docs/api/version-history.md). + +### New + +- Add ability to mount an image inside a container via `--mount type=image`. [moby/moby#48798](https://github.com/moby/moby/pull/48798) + * You can also specify `--mount type=image,image-subpath=[subpath],...` option to mount a specific path from the image. [docker/cli#5755](https://github.com/docker/cli/pull/5755) +- `docker images --tree` now shows metadata badges [docker/cli#5744](https://github.com/docker/cli/pull/5744) +- `docker load`, `docker save`, and `docker history` now support a `--platform` flag allowing you to choose a specific platform for single-platform operations on multi-platform images. [docker/cli#5331](https://github.com/docker/cli/pull/5331) +- Add `OOMScoreAdj` to `docker service create` and `docker stack`. [docker/cli#5145](https://github.com/docker/cli/pull/5145) +- `docker buildx prune` now supports `reserved-space`, `max-used-space`, `min-free-space` and `keep-bytes` filters. [moby/moby#48720](https://github.com/moby/moby/pull/48720) +- Windows: Add support for running containerd as a child process of the daemon, instead of using a system-installed containerd. [moby/moby#47955](https://github.com/moby/moby/pull/47955) + + +### Networking + +- The `docker-proxy` binary has been updated, older versions will not work with the updated `dockerd`. [moby/moby#48132](https://github.com/moby/moby/pull/48132) + - Close a window in which the userland proxy (`docker-proxy`) could accept TCP connections, that would then fail after `iptables` NAT rules were set up. + - The executable `rootlesskit-docker-proxy` is no longer used, it has been removed from the build and distribution. +- DNS nameservers read from the host's `/etc/resolv.conf` are now always accessed from the host's network namespace. [moby/moby#48290](https://github.com/moby/moby/pull/48290) + - When the host's `/etc/resolv.conf` contains no nameservers and there are no `--dns` overrides, Google's DNS servers are no longer used, apart from by the default bridge network and in build containers. +- Container interfaces in bridge and macvlan networks now use randomly generated MAC addresses. [moby/moby#48808](https://github.com/moby/moby/pull/48808) + - Gratuitous ARP / Neighbour Advertisement messages will be sent when the interfaces are started so that, when IP addresses are reused, they're associated with the newly generated MAC address. + - IPv6 addresses in the default bridge network are now IPAM-assigned, rather than being derived from the MAC address. +- The deprecated OCI `prestart` hook is now only used by build containers. For other containers, network interfaces are added to the network namespace after task creation is complete, before the container task is started. [moby/moby#47406](https://github.com/moby/moby/pull/47406) +- Add a new `gw-priority` option to `docker run`, `docker container create`, and `docker network connect`. This option will be used by the Engine to determine which network provides the default gateway for a container. On `docker run`, this option is only available through the extended `--network` syntax. [docker/cli#5664](https://github.com/docker/cli/pull/5664) +- Add a new netlabel `com.docker.network.endpoint.ifname` to customize the interface name used when connecting a container to a network. It's supported by all built-in network drivers on Linux. [moby/moby#49155](https://github.com/moby/moby/pull/49155) + - When a container is created with multiple networks specified, there's no guarantee on the order networks will be connected to the container. So, if a custom interface name uses the same prefix as the auto-generated names, for example `eth`, the container might fail to start. + - The recommended practice is to use a different prefix, for example `en0`, or a numerical suffix high enough to never collide, for example `eth100`. + - This label can be specified on `docker network connect` via the `--driver-opt` flag, for example `docker network connect --driver-opt=com.docker.network.endpoint.ifname=foobar …`. + - Or via the long-form `--network` flag on `docker run`, for example `docker run --network=name=bridge,driver-opt=com.docker.network.endpoint.ifname=foobar …` +- If a custom network driver reports capability `GwAllocChecker` then, before a network is created, it will get a `GwAllocCheckerRequest` with the network's options. The custom driver may then reply that no gateway IP address should be allocated. [moby/moby#49372](https://github.com/moby/moby/pull/49372) + +#### Port publishing in bridge networks + +- `dockerd` now requires `ipset` support in the Linux kernel. [moby/moby#48596](https://github.com/moby/moby/pull/48596) + - The `iptables` and `ip6tables` rules used to implement port publishing and network isolation have been extensively modified. This enables some of the following functional changes, and is a first step in refactoring to enable native `nftables` support in a future release. [moby/moby#48815](https://github.com/moby/moby/issues/48815) + - If it becomes necessary to downgrade to an earlier version of the daemon, some manual cleanup of the new rules will be necessary. The simplest and surest approach is to reboot the host, or use `iptables -F` and `ip6tables -F` to flush all existing `iptables` rules from the `filter` table before starting the older version of the daemon. When that is not possible, run the following commands as root: + - `iptables -D FORWARD -m set --match-set docker-ext-bridges-v4 dst -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT; ip6tables -D FORWARD -m set --match-set docker-ext-bridges-v6 dst -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT` + - `iptables -D FORWARD -m set --match-set docker-ext-bridges-v4 dst -j DOCKER; ip6tables -D FORWARD -m set --match-set docker-ext-bridges-v6 dst -j DOCKER` + - If you were previously running with the iptables filter-FORWARD policy set to `ACCEPT` and need to restore access to unpublished ports, also delete per-bridge-network rules from the `DOCKER` chains. For example, `iptables -D DOCKER ! -i docker0 -o docker0 -j DROP`. +- Fix a security issue that was allowing remote hosts to connect directly to a container on its published ports. [moby/moby#49325](https://github.com/moby/moby/pull/49325) +- Fix a security issue that was allowing neighbor hosts to connect to ports mapped on a loopback address. [moby/moby#49325](https://github.com/moby/moby/pull/49325) +- Fix an issue that prevented port publishing to link-local addresses. [moby/moby#48570](https://github.com/moby/moby/pull/48570) +- UDP ports published by a container are now reliably accessible by containers on other networks, via the host's public IP address. [moby/moby#48571](https://github.com/moby/moby/pull/48571) +- Docker will now only set the `ip6tables` policy for the `FORWARD` chain in the `filter` table to `DROP` if it enables IP forwarding on the host itself (sysctls `net.ipv6.conf.all.forwarding` and `net.ipv6.conf.default.forwarding`). This is now aligned with existing IPv4 behaviour. [moby/moby#48594](https://github.com/moby/moby/pull/48594) + - If IPv6 forwarding is enabled on your host, but you were depending on Docker to set the ip6tables filter-FORWARD policy to `DROP`, you may need to update your host's configuration to make sure it is secure. +- Direct routed access to container ports that are not exposed using `p`/`-publish` is now blocked in the `DOCKER` iptables chain. [moby/moby#48724](https://github.com/moby/moby/pull/48724) + - If the default iptables filter-FORWARD policy was previously left at `ACCEPT` on your host, and direct routed access to a container's unpublished ports from a remote host is still required, options are: + - Publish the ports you need. + - Use the new `gateway_mode_ipv[46]=nat-unprotected`, described below. + - Container ports published to host addresses will continue to be accessible via those host addresses, using NAT or the userland proxy. + - Unpublished container ports continue to be directly accessible from the Docker host via the container's IP address. +- Networks created with `gateway_mode_ipv[46]=routed` are now accessible from other bridge networks running on the same Docker host, as well as from outside the host. [moby/moby#48596](https://github.com/moby/moby/pull/48596) +- Bridge driver options `com.docker.network.bridge.gateway_mode_ipv4` and `com.docker.network.bridge.gateway_mode_ipv6` now accept mode `nat-unprotected`. [moby/moby#48597](https://github.com/moby/moby/pull/48597) + - `nat-unprotected` is similar to the default `nat` mode, but no per port/protocol rules are set up. This means any port on a container can be accessed by direct-routing from a remote host. +- Bridge driver options `com.docker.network.bridge.gateway_mode_ipv4` and `com.docker.network.bridge.gateway_mode_ipv6` now accept mode `isolated`, when the network is also `internal`. [moby/moby#49262](https://github.com/moby/moby/pull/49262) + - An address is normally assigned to the bridge device in an `internal` network. So, processes on the Docker host can access the network, and containers in the network can access host services listening on that bridge address (including services listening on "any" host address, `0.0.0.0` or `::`). + - An `internal` bridge network created with gateway mode `isolated` does not have an address on the Docker host. +- When a port mapping includes a host IP address or port number that cannot be used because NAT from the host is disabled using `--gateway_mode_ipv[46]`, container creation will no longer fail. The unused fields may be needed if the gateway endpoint changes when networks are connected or disconnected. A message about the unused fields will be logged. [moby/moby#48575](https://github.com/moby/moby/pull/48575) +- Do not create iptables nat-POSTROUTING masquerade rules for a container's own published ports, when the userland proxy is enabled. [moby/moby#48854](https://github.com/moby/moby/pull/48854) + +#### IPv6 + +- Add `docker network create` option `--ipv4`. To disable IPv4 address assignment for a network, use `docker network create --ipv4=false [...]`. [docker/cli#5599](https://github.com/docker/cli/pull/5599) +- Daemon option `--ipv6` (`"ipv6": true` in `daemon.json`) can now be used without `fixed-cidr-v6`. [moby/moby#48319](https://github.com/moby/moby/pull/48319) +- IPAM now handles subnets bigger than "/64". [moby/moby#49223](https://github.com/moby/moby/pull/49223) +- Duplicate address detection (DAD) is now disabled for addresses assigned to the bridges belonging to bridge networks. [moby/moby#48609](https://github.com/moby/moby/pull/48609) +- Modifications to `host-gateway`, for compatibility with IPv6-only networks. [moby/moby#48807](https://github.com/moby/moby/pull/48807) + - When special value `host-gateway` is used in an `--add-host` option in place of an address, it's replaced by an address on the Docker host to make it possible to refer to the host by name. The address used belongs to the default bridge (normally `docker0`). Until now it's always been an IPv4 address, because all containers on bridge networks had IPv4 addresses. + - Now, if IPv6 is enabled on the default bridge network, `/etc/hosts` entries will be created for IPv4 and IPv6 addresses. So, a container that's only connected to IPv6-only networks can access the host by name. + - The `--host-gateway-ip` option overrides the address used to replace `host-gateway`. Two of these options are now allowed on the command line, for one IPv4 gateway and one IPv6. + - In the `daemon.json` file, to provide two addresses, use `"host-gateway-ips"`. For example, `"host-gateway-ips": ["192.0.2.1", "2001:db8::1111"]`. + + +### Bug fixes and enhancements + +- Add IPv6 loopback address as an insecure registry by default. [moby/moby#48540](https://github.com/moby/moby/pull/48540) +- Add support for Cobra-generated completion scripts for `dockerd`. [moby/moby#49339](https://github.com/moby/moby/pull/49339) +- Fix DNS queries failing when containers are launched via `systemd` auto-start on boot [moby/moby#48812](https://github.com/moby/moby/pull/48812) +- Fix Docker Swarm mode ignoring `volume.subpath` [docker/cli#5833](https://github.com/docker/cli/pull/5833) +- Fix `docker export` continuing the export after the operation is canceled. [moby/moby#49265](https://github.com/moby/moby/pull/49265) +- Fix `docker export` not releasing the container's writable layer after a failure. [moby/moby#48517](https://github.com/moby/moby/pull/48517) +- Fix `docker images --tree` unnecessary truncating long image names when multiple names are available [docker/cli#5757](https://github.com/docker/cli/pull/5757) +- Fix a bug where a container with a name matching another container's ID is not restored on daemon startup. [moby/moby#48669](https://github.com/moby/moby/pull/48669) +- Fix an issue preventing some IPv6 addresses shown by `docker ps` to be properly bracketed [docker/cli#5468](https://github.com/docker/cli/pull/5468) +- Fix bug preventing image pulls from being cancelled during `docker run`. [docker/cli#5645](https://github.com/docker/cli/pull/5645) +- Fix error-handling when running the daemon as a Windows service to prevent unclean exits. [moby/moby#48518](https://github.com/moby/moby/pull/48518) +- Fix issue causing output of `docker run` to be inconsistent when using `--attach stdout` or `--attach stderr` versus `stdin`. `docker run --attach stdin` now exits if the container exits. [docker/cli#5662](https://github.com/docker/cli/pull/5662) +- Fix rootless Docker setup with `subid` backed by NSS modules. [moby/moby#49036](https://github.com/moby/moby/pull/49036) +- Generated completion scripts from the CLI now show descriptions next to each command/flag suggestion. [docker/cli#5756](https://github.com/docker/cli/pull/5756) +- IPv6 addresses shown by `docker ps` in port bindings are now bracketed [docker/cli#5363](https://github.com/docker/cli/pull/5363) +- Implement the ports validation method for Compose [docker/cli#5524](https://github.com/docker/cli/pull/5524) +- Improve error-output for invalid flags on the command line. [docker/cli#5233](https://github.com/docker/cli/pull/5233) +- Improve errors when failing to start a container using anther container's network namespace. [moby/moby#49367](https://github.com/moby/moby/pull/49367) +- Improve handling of invalid API errors that could result in an empty error message being shown. [moby/moby#49373](https://github.com/moby/moby/pull/49373) +- Improve output and consistency for unknown (sub)commands and invalid arguments [docker/cli#5234](https://github.com/docker/cli/pull/5234) +- Improve validation of `exec-opts` in daemon configuration. [moby/moby#48979](https://github.com/moby/moby/pull/48979) +- Update the handling of the `--gpus=0` flag to be consistent with the NVIDIA Container Runtime. [moby/moby#48482](https://github.com/moby/moby/pull/48482) +- `client.ContainerCreate` now normalizes `CapAdd` and `CapDrop` fields in `HostConfig` to their canonical form. [moby/moby#48551](https://github.com/moby/moby/pull/48551) +- `docker image save` now produces stable timestamps. [moby/moby#48611](https://github.com/moby/moby/pull/48611) +- `docker inspect` now lets you inspect Swarm configs [docker/cli#5573](https://github.com/docker/cli/pull/5573) +- containerd image store: Add support for `Extracting` layer status in `docker pull`. [moby/moby#49064](https://github.com/moby/moby/pull/49064) +- containerd image store: Fix `commit`, `import`, and `build` not preserving a replaced image as a dangling image. [moby/moby#48316](https://github.com/moby/moby/pull/48316) +- containerd image store: Make `docker load --platform` return an error when the requested platform isn't loaded. [moby/moby#48718](https://github.com/moby/moby/pull/48718) +- Fix validation of `--link` option. [docker/cli#5739](https://github.com/docker/cli/pull/5739) +- Add validation of network-diagnostic-port daemon configuration option. [moby/moby#49305](https://github.com/moby/moby/pull/49305) +- Unless explicitly configured, an IP address is no longer reserved for a gateway in cases where it is not required. Namely, “internal” bridge networks with option `com.docker.network.bridge.inhibit_ipv4`, `ipvlan` or `macvlan` networks with no parent interface, and L3 IPvlan modes. [moby/moby#49261](https://github.com/moby/moby/pull/49261) +- If a custom network driver reports capability `GwAllocChecker` then, before a network is created, it will get a `GwAllocCheckerRequest` with the network's options. The custom driver may then reply that no gateway IP address should be allocated. [moby/moby#49372](https://github.com/moby/moby/pull/49372) +- Fixed an issue that meant a container could not be attached to an L3 IPvlan at the same time as other network types. [moby/moby#49130](https://github.com/moby/moby/pull/49130) +- Remove the correct `/etc/hosts` entries when disconnecting a container from a network. [moby/moby#48857](https://github.com/moby/moby/pull/48857) +- Fix duplicate network disconnect events. [moby/moby#48800](https://github.com/moby/moby/pull/48800) +- Resolve issues related to changing `fixed-cidr` for `docker0`, and inferring configuration from a user-managed default bridge (`--bridge`). [moby/moby#48319](https://github.com/moby/moby/pull/48319) +- Remove feature flag `windows-dns-proxy`, introduced in release 26.1.0 to control forwarding to external DNS resolvers from Windows containers, to make `nslookup` work. It was enabled by default in release 27.0.0. [moby/moby#48738](https://github.com/moby/moby/pull/48738) +- Remove an `iptables` mangle rule for checksumming SCTP. The rule can be re-enabled by setting `DOCKER_IPTABLES_SCTP_CHECKSUM=1` in the daemon's environment. This override will be removed in a future release. [moby/moby#48149](https://github.com/moby/moby/pull/48149) +- Faster connection to bridge networks, in most cases. [moby/moby#49302](https://github.com/moby/moby/pull/49302) + + +### Packaging updates + +- Update Go runtime to [1.23.6](https://go.dev/doc/devel/release#go1.23.6). [docker/cli#5795](https://github.com/docker/cli/pull/5795), [moby/moby#49393](https://github.com/moby/moby/pull/49393), [docker/docker-ce-packaging#1161](https://github.com/docker/docker-ce-packaging/pull/1161) +- Update `runc` to [v1.2.5](https://github.com/opencontainers/runc/releases/tag/v1.2.5) (static binaries only). [moby/moby#49464](https://github.com/moby/moby/pull/49464) +- Update containerd to [v1.7.25](https://github.com/containerd/containerd/releases/tag/v1.7.25). [moby/moby#49252](https://github.com/moby/moby/pull/49252) +- Update BuildKit to [v0.20.0](https://github.com/moby/buildkit/releases/tag/v0.20.0). [moby/moby#49495](https://github.com/moby/moby/pull/49495) +- Update Buildx to [v0.21.0](https://github.com/docker/buildx/releases/tag/v0.21.0). [docker/docker-ce-packaging#1166](https://github.com/docker/docker-ce-packaging/pull/1166) +- Update Compose to [v2.32.4](https://github.com/docker/compose/releases/tag/v2.32.3). [docker/docker-ce-packaging#1143](https://github.com/docker/docker-ce-packaging/pull/1143) +- The canonical source for the `dockerd(8)` man page has been moved back to the `moby/moby` repository itself. [moby/moby#48298](https://github.com/moby/moby/pull/48298) + +### Go SDK + +- Improve validation of empty object IDs. The client now returns an "Invalid Parameter" error when trying to use an empty ID or name. This changes the error returned by some "Inspect" functions from a "Not found" error to an "Invalid Parameter". [moby/moby#49381](https://github.com/moby/moby/pull/49381) +- `Client.ImageBuild()` now omits default values from the API request's query string. [moby/moby#48651](https://github.com/moby/moby/pull/48651) +- `api/types/container`: Merge `Stats` and `StatsResponse` [moby/moby#49287](https://github.com/moby/moby/pull/49287) +- `client.WithVersion`: Strip v-prefix when setting API version [moby/moby#49352](https://github.com/moby/moby/pull/49352) +- `client`: Add `WithTraceOptions` allowing to specify custom OTe1 trace options. [moby/moby#49415](https://github.com/moby/moby/pull/49415) +- `client`: Add `HijackDialer` interface. [moby/moby#49388](https://github.com/moby/moby/pull/49388) +- `client`: Add `SwarmManagementAPIClient` interface to describe all API client methods related to Swarm-specific objects. [moby/moby#49388](https://github.com/moby/moby/pull/49388) +- `client`: Add `WithTraceOptions` allowing to specify custom OTel trace options. [moby/moby#49415](https://github.com/moby/moby/pull/49415) +- `client`: `ImageHistory`, `ImageLoad` and `ImageSave` now use variadic functional options [moby/moby#49466](https://github.com/moby/moby/pull/49466) +- `pkg/containerfs`: Move to internal [moby/moby#48097](https://github.com/moby/moby/pull/48097) +- `pkg/reexec`: Can now be used on platforms other than Linux, Windows, macOS and FreeBSD [moby/moby#49118](https://github.com/moby/moby/pull/49118) +- `api/types/container`: introduce `CommitResponse` type. This is currently an alias for `IDResponse`, but may become a distinct type in a future release. [moby/moby#49444](https://github.com/moby/moby/pull/49444) +- `api/types/container`: introduce `ExecCreateResponse` type. This is currently an alias for `IDResponse`, but may become a distinct type in a future release. [moby/moby#49444](https://github.com/moby/moby/pull/49444) + +### API + +- Update API version to [v1.48](https://docs.docker.com/engine/api/v1.48/) [moby/moby#48476](https://github.com/moby/moby/pull/48476) +- `GET /images/{name}/json` response now returns the `Manifests` field containing information about the sub-manifests contained in the image index. This includes things like platform-specific manifests and build attestations. [moby/moby#48264](https://github.com/moby/moby/pull/48264) +- `POST /containers/create` now supports `Mount` of type `image` for mounting an image inside a container. [moby/moby#48798](https://github.com/moby/moby/pull/48798) +- `GET /images/{name}/history` now supports a `platform` parameter (JSON encoded OCI Platform type) that lets you specify a platform to show the history of. [moby/moby#48295](https://github.com/moby/moby/pull/48295) +- `POST /images/{name}/load` and `GET /images/{name}/get` now supports a `platform` parameter (JSON encoded OCI Platform type) that lets you specify a platform to load/save. Not passing this parameter results in loading/saving the full multi-platform image. [moby/moby#48295](https://github.com/moby/moby/pull/48295) +- Improve errors for invalid width/height on container resize and exec resize [moby/moby#48679](https://github.com/moby/moby/pull/48679) +- The `POST /containers/create` endpoint now includes a warning in the response when setting the container-wide `VolumeDriver` option in combination with volumes defined through `Mounts` because the `VolumeDriver` option has no effect on those volumes. This warning was previously generated by the CLI. [moby/moby#48789](https://github.com/moby/moby/pull/48789) +- containerd image store: `GET /images/json` and `GET /images/{name}/json` responses now includes `Descriptor` field, which contains an OCI descriptor of the image target. The new field is only populated if the daemon provides a multi-platform image store. [moby/moby#48894](https://github.com/moby/moby/pull/48894) +- containerd image store: `GET /containers/{name}/json` now returns an `ImageManifestDescriptor` field containing the OCI descriptor of the platform-specific image manifest of the image that was used to create the container. [moby/moby#48855](https://github.com/moby/moby/pull/48855) +- Add debug endpoints (`GET /debug/vars`, `GET /debug/pprof/`, `GET /debug/pprof/cmdline`, `GET /debug/pprof/profile`, `GET /debug/pprof/symbol`, `GET /debug/pprof/trace`, `GET /debug/pprof/{name}`) are now also accessible through the versioned-API paths (`/v/`). [moby/moby#49051](https://github.com/moby/moby/pull/49051) +- Fix API returning a `500` status code instead of `400` for validation errors. [moby/moby#49217](https://github.com/moby/moby/pull/49217) +- Fix status codes for archive endpoints `HEAD /containers/{name:.*}/archive`, `GET /containers/{name:.*}/archive`, `PUT /containers/{name:.*}/archive` returning a `500` status instead of a `400` status. [moby/moby#49219](https://github.com/moby/moby/pull/49219) +- `POST /containers/create` now accepts a `writable-cgroups=true` option in `HostConfig.SecurityOpt` to mount the container's cgroups writable. This provides a more granular approach than `HostConfig.Privileged`. [moby/moby#48828](https://github.com/moby/moby/pull/48828) +- `POST /build/prune` renames `keep-bytes` to `reserved-space` and now supports additional prune parameters `max-used-space` and `min-free-space`. [moby/moby#48720](https://github.com/moby/moby/pull/48720) +- `POST /networks/create` now has an `EnableIPv4` field. Setting it to `false` disables IPv4 IPAM for the network. [moby/moby#48271](https://github.com/moby/moby/pull/48271) + - `GET /networks/{id}` now returns an `EnableIPv4` field showing whether the network has IPv4 IPAM enabled. [moby/moby#48271](https://github.com/moby/moby/pull/48271) + - User-defined bridge networks require either IPv4 or IPv6 address assignment to be enabled. IPv4 cannot be disabled for the default bridge network (`docker0`). [moby/moby#48323](https://github.com/moby/moby/pull/48323) + - `macvlan` and `ipvlan` networks can be created with address assignment disabled for IPv4, IPv6, or both address families. [moby/moby#48299](https://github.com/moby/moby/pull/48299) + - IPv4 cannot be disabled for Windows or Swarm networks. [moby/moby#48278](https://github.com/moby/moby/pull/48278) +- Add a way to specify which network should provide the default gateway for a container. [moby/moby#48936](https://github.com/moby/moby/pull/48936) + - `POST /networks/{id}/connect` and `POST /containers/create` now accept a `GwPriority` field in `EndpointsConfig`. This value is used to determine which network endpoint provides the default gateway for the container. The endpoint with the highest priority is selected. If multiple endpoints have the same priority, endpoints are sorted lexicographically by their network name, and the one that sorts first is picked. [moby/moby#48746](https://github.com/moby/moby/pull/48746) + - `GET /containers/json` now returns a `GwPriority` field in `NetworkSettings` for each network endpoint. The `GwPriority` field is used by the CLI’s new `gw-priority` option for `docker run` and `docker network connect`. [moby/moby#48746](https://github.com/moby/moby/pull/48746) +- Settings for `eth0` in `--sysctl` options are no longer automatically migrated to the network endpoint. [moby/moby#48746](https://github.com/moby/moby/pull/48746) + - For example, in the Docker CLI, `docker run --network mynet --sysctl net.ipv4.conf.eth0.log_martians=1 ...` is rejected. Instead, you must use `docker run --network name=mynet,driver-opt=com.docker.network.endpoint.sysctls=net.ipv4.conf.IFNAME.log_martians=1 ...` +- `GET /containers/json` now returns an `ImageManifestDescriptor` field matching the same field in `/containers/{name}/json`. This field is only populated if the daemon provides a multi-platform image store. [moby/moby#49407](https://github.com/moby/moby/pull/49407) + + +### Removed + +- The Fluent logger option `fluentd-async-connect` has been deprecated in v20.10 and is now removed. [moby/moby#46114](https://github.com/moby/moby/pull/46114) +- The `--time` option on `docker stop` and `docker restart` is deprecated and renamed to `--timeout`. [docker/cli#5485](https://github.com/docker/cli/pull/5485) +- Go-SDK: `pkg/ioutils`: Remove `NewReaderErrWrapper` as it was never used. [moby/moby#49258](https://github.com/moby/moby/pull/49258) +- Go-SDK: `pkg/ioutils`: Remove deprecated `BytesPipe`, `NewBytesPipe`, `ErrClosed`, `WriteCounter`, `NewWriteCounter`, `NewReaderErrWrapper`, `NopFlusher`. [moby/moby#49245](https://github.com/moby/moby/pull/49245) +- Go-SDK: `pkg/ioutils`: Remove deprecated `NopWriter` and `NopWriteCloser`. [moby/moby#49256](https://github.com/moby/moby/pull/49256) +- Go-SDK: `pkg/sysinfo`: Remove deprecated NumCPU. [moby/moby#49242](https://github.com/moby/moby/pull/49242) +- Go-SDK: Remove `pkg/broadcaster`, as it was only used internally [moby/moby#49172](https://github.com/moby/moby/pull/49172) +- Go-SDK: Remove deprecated `cli.Errors` type [docker/cli#5549](https://github.com/docker/cli/pull/5549) +- Remove `pkg/ioutils.ReadCloserWrapper`, as it was only used in tests. [moby/moby#49237](https://github.com/moby/moby/pull/49237) +- Remove deprecated `api-cors-header` config parameter and the `dockerd` `--api-cors-header` option [moby/moby#48209](https://github.com/moby/moby/pull/48209) +- Remove deprecated `APIEndpoint.Version` field, `APIVersion` type, and `APIVersion1` and `APIVersion2` consts. [moby/moby#49004](https://github.com/moby/moby/pull/49004) +- Remove deprecated `api-cors-header` config parameter and the Docker daemon's `--api-cors-header` option. [docker/cli#5437](https://github.com/docker/cli/pull/5437) +- Remove deprecated `pkg/directory` package [moby/moby#48779](https://github.com/moby/moby/pull/48779) +- Remove deprecated `pkg/dmsg.Dmesg()` [moby/moby#48109](https://github.com/moby/moby/pull/48109) +- Remove deprecated image/spec package, which was moved to a separate module (`github.com/moby/docker-image-spec`) [moby/moby#48460](https://github.com/moby/moby/pull/48460) +- Remove migration code and errors for the deprecated `logentries` logging driver. [moby/moby#48891](https://github.com/moby/moby/pull/48891) +- Remove support for deprecated external graph-driver plugins. [moby/moby#48072](https://github.com/moby/moby/pull/48072) +- `api/types`: Remove deprecated `container.ContainerNode` and `ContainerJSONBase.Node` field. [moby/moby#48107](https://github.com/moby/moby/pull/48107) +- `api/types`: Remove deprecated aliases: `ImagesPruneReport`, `VolumesPruneReport`, `NetworkCreateRequest`, `NetworkCreate`, `NetworkListOptions`, `NetworkCreateResponse`, `NetworkInspectOptions`, `NetworkConnect`, `NetworkDisconnect`, `EndpointResource`, `NetworkResource`, `NetworksPruneReport`, `ExecConfig`, `ExecStartCheck`, `ContainerExecInspect`, `ContainersPruneReport`, `ContainerPathStat`, `CopyToContainerOptions`, `ContainerStats`, `ImageSearchOptions`, `ImageImportSource`, `ImageLoadResponse`, `ContainerNode`. [moby/moby#48107](https://github.com/moby/moby/pull/48107) +- `libnetwork/iptables`: Remove deprecated `IPV`, `Iptables`, `IP6Tables` and `Passthrough()`. [moby/moby#49121](https://github.com/moby/moby/pull/49121) +- `pkg/archive`: Remove deprecated `CanonicalTarNameForPath`, `NewTempArchive`, `TempArchive` [moby/moby#48708](https://github.com/moby/moby/pull/48708) +- `pkg/fileutils`: Remove deprecated `GetTotalUsedFds` [moby/moby#49210](https://github.com/moby/moby/pull/49210) +- `pkg/ioutils`: Remove `OnEOFReader`, which was only used internally [moby/moby#49170](https://github.com/moby/moby/pull/49170) +- `pkg/longpath`: Remove deprecated `Prefix` constant. [moby/moby#48779](https://github.com/moby/moby/pull/48779) +- `pkg/stringid`: Remove deprecated `IsShortID` and `ValidateID` functions [moby/moby#48705](https://github.com/moby/moby/pull/48705) +- `runconfig/opts`: Remove deprecated `ConvertKVStringsToMap` [moby/moby#48102](https://github.com/moby/moby/pull/48102) +- `runconfig`: Remove deprecated `ContainerConfigWrapper`, `SetDefaultNetModeIfBlank`, `DefaultDaemonNetworkMode`, `IsPreDefinedNetwork` [moby/moby#48102](https://github.com/moby/moby/pull/48102) +- `container`: Remove deprecated `ErrNameReserved`, `ErrNameNotReserved`. [moby/moby#48728](https://github.com/moby/moby/pull/48728) +- Remove `Daemon.ContainerInspectCurrent()` method and change `Daemon.ContainerInspect()` signature to accept a `backend.ContainerInspectOptions` struct [moby/moby#48672](https://github.com/moby/moby/pull/48672) +- Remove deprecated `Daemon.Exists()` and `Daemon.IsPaused()` methods. [moby/moby#48723](https://github.com/moby/moby/pull/48723) + +### Deprecations + +- API: The `BridgeNfIptables` and `BridgeNfIp6tables` fields in the `GET /info` response are now always be `false` and will be omitted in API v1.49. The netfilter module is now loaded on-demand, and no longer during daemon startup, making these fields obsolete. [moby/moby#49114](https://github.com/moby/moby/pull/49114) +- API: The `error` and `progress` fields in streaming responses for endpoints that return a JSON progress response, such as `POST /images/create`, `POST /images/{name}/push`, and `POST /build` are deprecated. [moby/moby#49447](https://github.com/moby/moby/pull/49447) + - Users should use the information in the `errorDetail` and `progressDetail` fields instead. + - These fields were marked deprecated in API v1.4 (docker v0.6.0) and API v1.8 (docker v0.7.1) respectively, but still returned. + - These fields will be left empty or will be omitted in a future API version. +- Deprecate `Daemon.Register()`. This function is unused and will be removed in the next release. [moby/moby#48702](https://github.com/moby/moby/pull/48702) +- Deprecate `client.ImageInspectWithRaw` function in favor of the new `client.ImageInspect`. [moby/moby#48264](https://github.com/moby/moby/pull/48264) +- Deprecate `daemon/config.Config.ValidatePlatformConfig()`. This method was used as helper for `config.Validate`, which should be used instead. [moby/moby#48985](https://github.com/moby/moby/pull/48985) +- Deprecate `pkg/reexec`. This package is deprecated and moved to a separate module. Use `github.com/moby/sys/reexec` instead. [moby/moby#49129](https://github.com/moby/moby/pull/49129) +- Deprecate configuration for pushing non-distributable artifacts [docker/cli#5724](https://github.com/docker/cli/pull/5724) +- Deprecate the `--allow-nondistributable-artifacts` daemon flag and corresponding `allow-nondistributable-artifacts` field in `daemon.json`. Setting either option will no longer take an effect, but a deprecation warning log is added. [moby/moby#49065](https://github.com/moby/moby/pull/49065) +- Deprecate the `RegistryConfig.AllowNondistributableArtifactsCIDRs` and `RegistryConfig.AllowNondistributableArtifactsHostnames` fields in the `GET /info` API response. For API version v1.48 and older, the fields are still included in the response, but always `null`. In API version v1.49 and later, the field will be omitted entirely. [moby/moby#49065](https://github.com/moby/moby/pull/49065) +- Go-SDK: Deprecate `registry.ServiceOptions.AllowNondistributableArtifacts` field. [moby/moby#49065](https://github.com/moby/moby/pull/49065) +- Go-SDK: The `BridgeNfIptables`, `BridgeNfIp6tables` fields in `api/types/system.Info` and `BridgeNFCallIPTablesDisabled`, `BridgeNFCallIP6TablesDisabled` fields in `pkg/sysinfo.SysInfo` are deprecated and will be removed in the next release. [moby/moby#49114](https://github.com/moby/moby/pull/49114) +- Go-SDK: `client`: Deprecate `CommonAPIClient` interface in favor of the `APIClient` interface. The `CommonAPIClient` will be changed to an alias for `APIClient` in the next release, and removed in the release after. [moby/moby#49388](https://github.com/moby/moby/pull/49388) +- Go-SDK: `client`: Deprecate `ErrorConnectionFailed` helper. This function was only used internally, and will be removed in the next release. [moby/moby#49389](https://github.com/moby/moby/pull/49389) +- Go-SDK: `pkg/ioutils`: Deprecate `NewAtomicFileWriter`, `AtomicWriteFile`, `AtomicWriteSet`, `NewAtomicWriteSet` in favor of `pkg/atomicwriter` equivalents. [moby/moby#49171](https://github.com/moby/moby/pull/49171) +- Go-SDK: `pkg/sysinfo`: Deprecate `NumCPU`. This utility has the same behavior as `runtime.NumCPU`. [moby/moby#49241](https://github.com/moby/moby/pull/49241) +- Go-SDK: `pkg/system`: Deprecate `MkdirAll`. This function provided custom handling for Windows GUID volume paths. Handling for such paths is now supported by Go standard library in go1.22 and newer, and this function is now an alias for `os.MkdirAll`, which should be used instead. This alias will be removed in the next release. [moby/moby#49162](https://github.com/moby/moby/pull/49162) +- Go-SDK: Deprecate `pkg/parsers.ParseKeyValueOpt`. [moby/moby#49177](https://github.com/moby/moby/pull/49177) +- Go-SDK: Deprecate `pkg/parsers.ParseUintListMaximum`, `pkg/parsers.ParseUintList`. These utilities were only used internally and will be removed in the next release. [moby/moby#49222](https://github.com/moby/moby/pull/49222) +- Go-SDK: Deprecate `api/type.IDResponse` in favor of `container.CommitResponse` and `container.ExecCreateResponse`, which are currently an alias, but may become distinct types in a future release. This type will be removed in the next release. [moby/moby#49446](https://github.com/moby/moby/pull/49446) +- Go-SDK: Deprecate `api/types/container.ContainerUpdateOKBody` in favor of `UpdateResponse`. This type will be removed in the next release. [moby/moby#49442](https://github.com/moby/moby/pull/49442) +- Go-SDK: Deprecate `api/types/container.ContainerTopOKBody` in favor of `TopResponse`. This type will be removed in the next release. [moby/moby#49442](https://github.com/moby/moby/pull/49442) +- Go-SDK: `pkg/jsonmessage`: Fix deprecation of `ProgressMessage`, `ErrorMessage`, which were deprecated in Docker v0.6.0 and v0.7.1 respectively. [moby/moby#49447](https://github.com/moby/moby/pull/49447) +- Move `GraphDriverData` from `api/types` to `api/types/storage`. The old type is deprecated and will be removed in the next release. [moby/moby#48108](https://github.com/moby/moby/pull/48108) +- Move `RequestPrivilegeFunc` from `api/types` to `api/types/registry`. The old type is deprecated and will be removed in the next release. [moby/moby#48119](https://github.com/moby/moby/pull/48119) +- Move from `api/types` to `api/types/container` - `NetworkSettings`, `NetworkSettingsBase`, `DefaultNetworkSettings`, `SummaryNetworkSettings`, `Health`, `HealthcheckResult`, `NoHealthcheck`, `Starting`, `Healthy`, and `Unhealthy` constants, `MountPoint`, `Port`, `ContainerState`, `Container`, `ContainerJSONBase`, `ContainerJSON`, `ContainerNode`. The old types are deprecated and will be removed in the next release. [moby/moby#48108](https://github.com/moby/moby/pull/48108) +- Move from `api/types` to `api/types/image` - `ImageInspect`, `RootFS`. The old types are deprecated and will be removed in the next release. [moby/moby#48108](https://github.com/moby/moby/pull/48108) +- `ContainerdCommit.Expected`, `RuncCommit.Expected`, and `InitCommit.Expected` fields in the `GET /info` endpoint are deprecated and will be omitted in API v1.49. [moby/moby#48478](https://github.com/moby/moby/pull/48478) +- `api/types/registry`: Deprecate `ServiceConfig.AllowNondistributableArtifactsCIDRs` and `ServiceConfig.AllowNondistributableArtifactsHostnames` fields. These fields will be removed in the next release. [moby/moby#49065](https://github.com/moby/moby/pull/49065) +- `api/types/system/Commit.Expected` field is deprecated and should no longer be used. [moby/moby#48478](https://github.com/moby/moby/pull/48478) +- `daemon/graphdriver`: Deprecate `GetDriver()` [moby/moby#48079](https://github.com/moby/moby/pull/48079) +- `libnetwork/iptables`: Deprecate `Passthrough`. This function was only used internally, and will be removed in the next release. [moby/moby#49115](https://github.com/moby/moby/pull/49115) +- `pkg/directory.Size()` function is deprecated, and will be removed in the next release. [moby/moby#48057](https://github.com/moby/moby/pull/48057) +- `registry`: Deprecate `APIEndpoint.TrimHostName`; hostname is now trimmed unconditionally for remote names. This field will be removed in the next release. [moby/moby#49005](https://github.com/moby/moby/pull/49005) +- `allow-nondistributable-artifacts` field in `daemon.json`. Setting either option will no longer take effect, but a deprecation warning log is added to raise awareness about the deprecation. This warning is planned to become an error in the next release. [moby/moby#49065](https://github.com/moby/moby/pull/49065) diff --git a/content/manuals/engine/release-notes/prior-releases.md b/content/manuals/engine/release-notes/prior-releases.md index ffe5dac7966e..e0952545b082 100644 --- a/content/manuals/engine/release-notes/prior-releases.md +++ b/content/manuals/engine/release-notes/prior-releases.md @@ -328,7 +328,7 @@ If you are currently using the `--ipv6` option _without_ specifying the `--fixed-cidr-v6` option, the Docker daemon will refuse to start with the following message: -```none +```text Error starting daemon: Error initializing network controller: Error creating default "bridge" network: failed to parse pool request for address space "LocalDefault" pool " subpool ": @@ -344,7 +344,7 @@ In a similar way, if you specify the `--ipv6` flag when creating a network with the default IPAM driver, without providing an IPv6 `--subnet`, network creation will fail with the following message: -```none +```text Error response from daemon: failed to parse pool request for address space "LocalDefault" pool "" subpool "": could not find an available, non-overlapping IPv6 address pool among @@ -397,7 +397,7 @@ If you are currently using the `--ipv6` option _without_ specifying the `--fixed-cidr-v6` option, the Docker daemon will refuse to start with the following message: -```none +```text Error starting daemon: Error initializing network controller: Error creating default "bridge" network: failed to parse pool request for address space "LocalDefault" pool " subpool ": @@ -413,7 +413,7 @@ In a similar way, if you specify the `--ipv6` flag when creating a network with the default IPAM driver, without providing an IPv6 `--subnet`, network creation will fail with the following message: -```none +```text Error response from daemon: failed to parse pool request for address space "LocalDefault" pool "" subpool "": could not find an available, non-overlapping IPv6 address pool among diff --git a/content/manuals/engine/security/rootless.md b/content/manuals/engine/security/rootless.md index 6f42a9296b83..f267d59f4c52 100644 --- a/content/manuals/engine/security/rootless.md +++ b/content/manuals/engine/security/rootless.md @@ -490,7 +490,7 @@ The value is automatically set to `/run/user/$UID` and cleaned up on every logou **`systemctl --user` fails with "Failed to connect to bus: No such file or directory"** -This error occurs mostly when you switch from the root user to an non-root user with `sudo`: +This error occurs mostly when you switch from the root user to a non-root user with `sudo`: ```console # sudo -iu testuser diff --git a/content/manuals/engine/security/trust/trust_delegation.md b/content/manuals/engine/security/trust/trust_delegation.md index 1c0d25012032..df4b8d933ada 100644 --- a/content/manuals/engine/security/trust/trust_delegation.md +++ b/content/manuals/engine/security/trust/trust_delegation.md @@ -122,7 +122,7 @@ Successfully generated and loaded private key. Corresponding public key availabl ### Manually generating keys -If you need to manually generate a private key (either RSA or ECDSA) and a x509 +If you need to manually generate a private key (either RSA or ECDSA) and an X.509 certificate containing the public key, you can use local tools like openssl or cfssl along with a local or company-wide Certificate Authority. diff --git a/content/manuals/engine/security/trust/trust_sandbox.md b/content/manuals/engine/security/trust/trust_sandbox.md index 3c94407c7866..8b159b8691cf 100644 --- a/content/manuals/engine/security/trust/trust_sandbox.md +++ b/content/manuals/engine/security/trust/trust_sandbox.md @@ -67,10 +67,10 @@ the `trustsandbox` container, the Notary server, and the Registry server. $ mkdir trustsandbox $ cd trustsandbox -2. Create a file called `compose.yml` with your favorite editor. For example, using vim: +2. Create a file called `compose.yaml` with your favorite editor. For example, using vim: - $ touch compose.yml - $ vim compose.yml + $ touch compose.yaml + $ vim compose.yaml 3. Add the following to the new file. diff --git a/content/manuals/engine/security/userns-remap.md b/content/manuals/engine/security/userns-remap.md index 57dfe29986bc..1d981dc109f2 100644 --- a/content/manuals/engine/security/userns-remap.md +++ b/content/manuals/engine/security/userns-remap.md @@ -22,7 +22,7 @@ The remapping itself is handled by two files: `/etc/subuid` and `/etc/subgid`. Each file works the same, but one is concerned with the user ID range, and the other with the group ID range. Consider the following entry in `/etc/subuid`: -```none +```text testuser:231072:65536 ``` @@ -93,7 +93,7 @@ avoid these situations. and a maximum number of UIDs or GIDs available to the user. For instance, given the following entry: - ```none + ```text testuser:231072:65536 ``` diff --git a/content/manuals/engine/storage/drivers/_index.md b/content/manuals/engine/storage/drivers/_index.md index 57bf355e7b81..c6241730936c 100644 --- a/content/manuals/engine/storage/drivers/_index.md +++ b/content/manuals/engine/storage/drivers/_index.md @@ -6,6 +6,7 @@ weight: 40 aliases: - /storage/storagedriver/imagesandcontainers/ - /storage/storagedriver/ + - /engine/userguide/storagedriver/imagesandcontainers/ --- To use storage drivers effectively, it's important to know how Docker builds and diff --git a/content/manuals/engine/storage/drivers/btrfs-driver.md b/content/manuals/engine/storage/drivers/btrfs-driver.md index 720f6d59be8f..ecfa3179ea34 100644 --- a/content/manuals/engine/storage/drivers/btrfs-driver.md +++ b/content/manuals/engine/storage/drivers/btrfs-driver.md @@ -6,6 +6,15 @@ aliases: - /storage/storagedriver/btrfs-driver/ --- +> [!IMPORTANT] +> +> In most cases you should use the `overlay2` storage driver - it's not +> required to use the `btrfs` storage driver simply because your system uses +> Btrfs as its root filesystem. +> +> Btrfs driver has known issues. See [Moby issue #27653](https://github.com/moby/moby/issues/27653) +> for more information. + Btrfs is a copy-on-write filesystem that supports many advanced storage technologies, making it a good fit for Docker. Btrfs is included in the mainline Linux kernel. diff --git a/content/manuals/engine/storage/drivers/device-mapper-driver.md b/content/manuals/engine/storage/drivers/device-mapper-driver.md index 7eb9de9bb6ab..c9aa3f7d655c 100644 --- a/content/manuals/engine/storage/drivers/device-mapper-driver.md +++ b/content/manuals/engine/storage/drivers/device-mapper-driver.md @@ -297,7 +297,7 @@ assumes that the Docker daemon is in the `stopped` state. The example below adds 20% more capacity when the disk usage reaches 80%. - ```none + ```text activation { thin_pool_autoextend_threshold=80 thin_pool_autoextend_percent=20 diff --git a/content/manuals/engine/storage/drivers/overlayfs-driver.md b/content/manuals/engine/storage/drivers/overlayfs-driver.md index 2d28af93084e..5064fb0c32b9 100644 --- a/content/manuals/engine/storage/drivers/overlayfs-driver.md +++ b/content/manuals/engine/storage/drivers/overlayfs-driver.md @@ -104,7 +104,7 @@ its compatibility with different backing filesystems. OverlayFS layers two directories on a single Linux host and presents them as a single directory. These directories are called layers, and the unification process is referred to as a union mount. OverlayFS refers to the lower directory -as `lowerdir` and the upper directory a `upperdir`. The unified view is exposed +as `lowerdir` and the upper directory as `upperdir`. The unified view is exposed through its own directory called `merged`. The `overlay2` driver natively supports up to 128 lower OverlayFS layers. This diff --git a/content/manuals/engine/storage/drivers/select-storage-driver.md b/content/manuals/engine/storage/drivers/select-storage-driver.md index fe441ce5fc83..18785775c510 100644 --- a/content/manuals/engine/storage/drivers/select-storage-driver.md +++ b/content/manuals/engine/storage/drivers/select-storage-driver.md @@ -108,13 +108,20 @@ With regard to Docker, the backing filesystem is the filesystem where `/var/lib/docker/` is located. Some storage drivers only work with specific backing filesystems. -| Storage driver | Supported backing filesystems | -| :--------------- | :---------------------------- | -| `overlay2` | `xfs` with ftype=1, `ext4` | -| `fuse-overlayfs` | any filesystem | -| `btrfs` | `btrfs` | -| `zfs` | `zfs` | -| `vfs` | any filesystem | +| Storage driver | Supported backing filesystems | +| :--------------- | :-----------------------------------------------------| +| `overlay2` | `xfs` with ftype=1, `ext4`, `btrfs`, (and more) | +| `fuse-overlayfs` | any filesystem | +| `btrfs` | `btrfs` | +| `zfs` | `zfs` | +| `vfs` | any filesystem | + +> [!NOTE] +> +> Most filesystems should work if they have the required features. +> Consult [OverlayFS](https://www.kernel.org/doc/html/latest/filesystems/overlayfs.html) +> for more information. + ## Other considerations diff --git a/content/manuals/engine/storage/tmpfs.md b/content/manuals/engine/storage/tmpfs.md index b4e186acb3d0..29473ada2bad 100644 --- a/content/manuals/engine/storage/tmpfs.md +++ b/content/manuals/engine/storage/tmpfs.md @@ -135,7 +135,7 @@ Valid options for `--mount type=tmpfs` include: | Option | Description | | :----------------------------- | :--------------------------------------------------------------------------------------------------------------------- | -| `destination`, `dst`, `target` | Size of the tmpfs mount in bytes. If unset, the default maximum size of a tmpfs volume is 50% of the host's total RAM. | +| `destination`, `dst`, `target` | Container path to mount into a tmpfs. | | `tmpfs-size` | Size of the tmpfs mount in bytes. If unset, the default maximum size of a tmpfs volume is 50% of the host's total RAM. | | `tmpfs-mode` | File mode of the tmpfs in octal. For instance, `700` or `0770`. Defaults to `1777` or world-writable. | diff --git a/content/manuals/engine/storage/volumes.md b/content/manuals/engine/storage/volumes.md index c99eed561d8c..bc0eef21b4a3 100644 --- a/content/manuals/engine/storage/volumes.md +++ b/content/manuals/engine/storage/volumes.md @@ -13,6 +13,7 @@ aliases: - /engine/userguide/dockervolumes/ - /engine/admin/volumes/volumes/ - /storage/volumes/ + - /engine/admin/volumes/backing-up/ --- Volumes are persistent data stores for containers, created and managed by @@ -56,7 +57,7 @@ If your container generates non-persistent state data, consider using a increase the container's performance by avoiding writing into the container's writable layer. -Volumes use `rprivate` bind propagation, and bind propagation isn't +Volumes use `rprivate` (recursive private) bind propagation, and bind propagation isn't configurable for volumes. ## A volume's lifecycle @@ -493,7 +494,7 @@ $ docker run --rm \ alpine mkdir -p /logs/app1 /logs/app2 $ docker run -d \ --name=app1 \ - --mount src=logs,dst=/var/log/app1/,volume-subpath=app1 \ + --mount src=logs,dst=/var/log/app1,volume-subpath=app1 \ app1:latest $ docker run -d \ --name=app2 \ @@ -525,7 +526,7 @@ store data in the cloud, without changing the application logic. When you create a volume using `docker volume create`, or when you start a container which uses a not-yet-created volume, you can specify a volume driver. -The following examples use the `vieux/sshfs` volume driver, first when creating +The following examples use the `rclone/docker-volume-rclone` volume driver, first when creating a standalone volume, and then when starting a container which creates a new volume. @@ -551,30 +552,32 @@ volume. The following example assumes that you have two nodes, the first of which is a Docker host and can connect to the second node using SSH. -On the Docker host, install the `vieux/sshfs` plugin: +On the Docker host, install the `rclone/docker-volume-rclone` plugin: ```console -$ docker plugin install --grant-all-permissions vieux/sshfs +$ docker plugin install --grant-all-permissions rclone/docker-volume-rclone --aliases rclone ``` ### Create a volume using a volume driver -This example specifies an SSH password, but if the two hosts have shared keys -configured, you can exclude the password. Each volume driver may have zero or more +This example mounts the `/remote` directory on host `1.2.3.4` into a +volume named `rclonevolume`. Each volume driver may have zero or more configurable options, you specify each of them using an `-o` flag. ```console -$ docker volume create --driver vieux/sshfs \ - -o sshcmd=test@node2:/home/test \ - -o password=testpassword \ - sshvolume +$ docker volume create \ + -d rclone \ + --name rclonevolume \ + -o type=sftp \ + -o path=remote \ + -o sftp-host=1.2.3.4 \ + -o sftp-user=user \ + -o "sftp-password=$(cat file_containing_password_for_remote_host)" ``` -### Start a container which creates a volume using a volume driver +This volume can now be mounted into containers. -The following example specifies an SSH password. However, if the two hosts have -shared keys configured, you can exclude the password. -Each volume driver may have zero or more configurable options. +### Start a container which creates a volume using a volume driver > [!NOTE] > @@ -583,8 +586,8 @@ Each volume driver may have zero or more configurable options. ```console $ docker run -d \ - --name sshfs-container \ - --mount type=volume,volume-driver=vieux/sshfs,src=sshvolume,target=/app,volume-opt=sshcmd=test@node2:/home/test,volume-opt=password=testpassword \ + --name rclone-container \ + --mount type=volume,volume-driver=rclone,src=rclonevolume,target=/app,volume-opt=type=sftp,volume-opt=path=remote, volume-opt=sftp-host=1.2.3.4,volume-opt=sftp-user=user,volume-opt=-o "sftp-password=$(cat file_containing_password_for_remote_host)" \ nginx:latest ``` @@ -622,7 +625,7 @@ $ docker volume create \ --opt type=cifs \ --opt device=//uxxxxx.your-server.de/backup \ --opt o=addr=uxxxxx.your-server.de,username=uxxxxxxx,password=*****,file_mode=0777,dir_mode=0777 \ - --name cif-volume + --name cifs-volume ``` The `addr` option is required if you specify a hostname instead of an IP. diff --git a/content/manuals/engine/swarm/_index.md b/content/manuals/engine/swarm/_index.md index 8a2863ce70d1..0635ed1ce453 100644 --- a/content/manuals/engine/swarm/_index.md +++ b/content/manuals/engine/swarm/_index.md @@ -46,7 +46,7 @@ aliases: - /swarm/swarm_at_scale/troubleshoot/ --- -{{< include "swarm-mode.md" >}} +{{% include "swarm-mode.md" %}} Current versions of Docker include Swarm mode for natively managing a cluster of Docker Engines called a swarm. Use the Docker CLI to create a swarm, deploy diff --git a/content/manuals/engine/swarm/admin_guide.md b/content/manuals/engine/swarm/admin_guide.md index d63579a8442f..fb75324fd736 100644 --- a/content/manuals/engine/swarm/admin_guide.md +++ b/content/manuals/engine/swarm/admin_guide.md @@ -221,7 +221,7 @@ the `docker node rm` command. If a node becomes unreachable, unresponsive, or compromised you can forcefully remove the node without shutting it down by passing the `--force` flag. For instance, if `node9` becomes compromised: -```none +```console $ docker node rm node9 Error response from daemon: rpc error: code = 9 desc = node node9 is not down and can't be removed @@ -338,7 +338,7 @@ If you lose the quorum of managers, you cannot administer the swarm. If you have lost the quorum and you attempt to perform any management operation on the swarm, an error occurs: -```none +```text Error response from daemon: rpc error: code = 4 desc = context deadline exceeded ``` diff --git a/content/manuals/engine/swarm/configs.md b/content/manuals/engine/swarm/configs.md index 7d2d5acba889..8585daf749e1 100644 --- a/content/manuals/engine/swarm/configs.md +++ b/content/manuals/engine/swarm/configs.md @@ -216,7 +216,7 @@ real-world example, continue to to the config. The container ID is different, because the `service update` command redeploys the service. - ```none + ```console $ docker container exec -it $(docker ps --filter name=redis -q) cat /my-config cat: can't open '/my-config': No such file or directory @@ -248,7 +248,7 @@ This example assumes that you have PowerShell installed. ``` - + 2. If you have not already done so, initialize or join the swarm. ```powershell @@ -373,7 +373,7 @@ generate the site key and certificate, name the files `site.key` and the following contents into it. This constrains the root CA to only sign leaf certificates and not intermediate CAs. - ```none + ```ini [root_ca] basicConstraints = critical,CA:TRUE,pathlen:1 keyUsage = critical, nonRepudiation, cRLSign, keyCertSign @@ -407,7 +407,7 @@ generate the site key and certificate, name the files `site.key` and certificate so that it can only be used to authenticate a server and can't be used to sign certificates. - ```none + ```ini [server] authorityKeyIdentifier=keyid,issuer basicConstraints = critical,CA:FALSE @@ -438,7 +438,7 @@ generate the site key and certificate, name the files `site.key` and In the current directory, create a new file called `site.conf` with the following contents: - ```none + ```nginx server { listen 443 ssl; server_name localhost; @@ -616,7 +616,7 @@ configuration file. 1. Edit the `site.conf` file locally. Add `index.php` to the `index` line, and save the file. - ```none + ```nginx server { listen 443 ssl; server_name localhost; diff --git a/content/manuals/engine/swarm/how-swarm-mode-works/pki.md b/content/manuals/engine/swarm/how-swarm-mode-works/pki.md index d0ba71f22a40..6ed7821ce2c0 100644 --- a/content/manuals/engine/swarm/how-swarm-mode-works/pki.md +++ b/content/manuals/engine/swarm/how-swarm-mode-works/pki.md @@ -36,7 +36,7 @@ communications using a minimum of TLS 1.2. The example below shows the information from a certificate from a worker node: -```none +```text Certificate: Data: Version: 3 (0x2) diff --git a/content/manuals/engine/swarm/services.md b/content/manuals/engine/swarm/services.md index 711639b7866d..a0164b919061 100644 --- a/content/manuals/engine/swarm/services.md +++ b/content/manuals/engine/swarm/services.md @@ -318,7 +318,7 @@ node is responsible for resolving the tag to a digest, and different nodes may use different versions of the image. If this happens, a warning like the following is logged, substituting the placeholders for real information. -```none +```text unable to pin image to digest: ``` @@ -712,7 +712,7 @@ $ docker service create \ nginx ``` -You can also use the `constraint` service-level key in a `compose.yml` +You can also use the `constraint` service-level key in a `compose.yaml` file. If you specify multiple placement constraints, the service only deploys onto diff --git a/content/manuals/engine/swarm/stack-deploy.md b/content/manuals/engine/swarm/stack-deploy.md index 414d85752f0f..373193f883bc 100644 --- a/content/manuals/engine/swarm/stack-deploy.md +++ b/content/manuals/engine/swarm/stack-deploy.md @@ -8,7 +8,7 @@ When running Docker Engine in swarm mode, you can use `docker stack deploy` to deploy a complete application stack to the swarm. The `deploy` command accepts a stack description in the form of a [Compose file](/reference/compose-file/legacy-versions.md). -{{< include "swarm-compose-compat.md" >}} +{{% include "swarm-compose-compat.md" %}} To run through this tutorial, you need: @@ -95,7 +95,7 @@ counter whenever you visit it. 3. Create a file called `requirements.txt` and paste these two lines in: - ```none + ```text flask redis ``` @@ -111,7 +111,7 @@ counter whenever you visit it. CMD ["python", "app.py"] ``` -5. Create a file called `compose.yml` and paste this in: +5. Create a file called `compose.yaml` and paste this in: ```yaml services: @@ -220,7 +220,7 @@ The stack is now ready to be deployed. 1. Create the stack with `docker stack deploy`: ```console - $ docker stack deploy --compose-file compose.yml stackdemo + $ docker stack deploy --compose-file compose.yaml stackdemo Ignoring unsupported options: build diff --git a/content/manuals/engine/swarm/swarm-mode.md b/content/manuals/engine/swarm/swarm-mode.md index 6bff4a226ba4..aba6ddde2a0f 100644 --- a/content/manuals/engine/swarm/swarm-mode.md +++ b/content/manuals/engine/swarm/swarm-mode.md @@ -36,7 +36,7 @@ as follows: * Creates a swarm named `default`. * Designates the current node as a leader manager node for the swarm. * Names the node with the machine hostname. -* Configures the manager to listen on an active network interface on port `2377``. +* Configures the manager to listen on an active network interface on port `2377`. * Sets the current node to `Active` availability, meaning it can receive tasks from the scheduler. * Starts an internal distributed data store for Engines participating in the diff --git a/content/manuals/enterprise/enterprise-deployment/_index.md b/content/manuals/enterprise/enterprise-deployment/_index.md new file mode 100644 index 000000000000..8324d70ef201 --- /dev/null +++ b/content/manuals/enterprise/enterprise-deployment/_index.md @@ -0,0 +1,35 @@ +--- +title: Deploy Docker Desktop +weight: 10 +description: If you're an IT admin, learn how to deploy Docker Desktop at scale +keywords: msi, docker desktop, windows, installation, mac, pkg, enterprise +params: + sidebar: + group: Enterprise +grid: +- title: MSI installer + description: Learn how to install Docker Desktop with the MSI installer. + link: /enterprise/enterprise-deployment/msi-install-and-configure/ +- title: PKG installer + description: Learn how to install Docker Desktop with the PKG installer. + link: /enterprise/enterprise-deployment/pkg-install-and-configure/ +- title: MS Store + description: Learn how to install Docker Desktop through the Microsoft Store. + link: /enterprise/enterprise-deployment/ms-store/ +- title: Deploy with Intune + description: Learn how to deploy Docker Desktop on Windows and macOS devices using Microsoft Intune. + link: /enterprise/enterprise-deployment/use-intune/ +- title: Deploy with Jamf Pro + description: Learn how to deploy Docker Desktop for Mac using Jamf Pro + link: /enterprise/enterprise-deployment/use-jamf-pro/ +- title: Docker Desktop for Microsoft Dev Box + description: Install Docker Desktop for Microsoft Dev Box via the Microsoft Azure Marketlplace + link: /enterprise/enterprise-deployment/dev-box/ +- title: FAQs + description: Common questions when deploying Docker Desktop + link: /enterprise/enterprise-deployment/faq/ +--- + +Docker Desktop supports scalable deployment options tailored for enterprise IT environments. Whether you're rolling out Docker across hundreds of developer workstations or enforcing consistent configuration through MDM solutions like Intune or Jamf, this section provides everything you need to install, configure, and manage Docker Desktop in a secure, repeatable way. Learn how to use MSI and PKG installers, configure default settings, control updates, and ensure compliance with your organization's policies—across Windows, macOS, and Linux systems. + +{{< grid >}} \ No newline at end of file diff --git a/content/manuals/enterprise/enterprise-deployment/dev-box.md b/content/manuals/enterprise/enterprise-deployment/dev-box.md new file mode 100644 index 000000000000..f7c2821c2fd0 --- /dev/null +++ b/content/manuals/enterprise/enterprise-deployment/dev-box.md @@ -0,0 +1,60 @@ +--- +Title: Docker Desktop in Microsoft Dev Box +linkTitle: Microsoft Dev Box +description: Learn about the benefits of and how to setup Docker Desktop in Microsoft Dev Box +keywords: desktop, docker, windows, microsoft dev box +weight: 60 +aliases: + - /desktop/features/dev-box/ + - /desktop/setup/install/enterprise-deployment/dev-box/ +--- + +Docker Desktop is available as a pre-configured image in the Microsoft Azure Marketplace for use with Microsoft Dev Box, allowing developers to quickly set up consistent development environments in the cloud. + +Microsoft Dev Box provides cloud-based, pre-configured developer workstations that allow you to code, build, and test applications without configuring a local development environment. The Docker Desktop image for Microsoft Dev Box comes with Docker Desktop and its dependencies pre-installed, giving you a ready-to-use containerized development environment. + +## Key benefits + +- Pre-configured environment: Docker Desktop, WSL2, and other requirements come pre-installed and configured +- Consistent development: Ensure all team members work with the same Docker environment +- Powerful resources: Access more compute power and storage than might be available on local machines +- State persistence: Dev Box maintains your state between sessions, similar to hibernating a local machine +- Seamless licensing: Use your existing Docker subscription or purchase a new one directly through Azure Marketplace + +## Setup + +### Prerequisites + +- An Azure subscription +- Access to Microsoft Dev Box +- A Docker subscription (Pro, Team, or Business). You can use Docker Desktop in Microsoft Dev Box with any of the following subscription options: + - An existing or new Docker subscription + - A new Docker subscription purchased through Azure Marketplace + - A Docker Business subscription with SSO configured for your organization + +### Set up Docker Desktop in Dev Box + +1. Navigate to the [Docker Desktop for Microsoft Dev Box](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/dockerinc1694120899427.devbox_azuremachine?tab=Overview) listing in Azure Marketplace. +2. Select **Get It Now** to add the virtual machine image to your subscription. +3. Follow the Azure workflow to complete the setup. +4. Use the image to create VMs, assign to Dev Centers, or create Dev Box Pools according to your organization's setup. + +### Activate Docker Desktop + +Once your Dev Box is provisioned with the Docker Desktop image: + +1. Start your Dev Box instance. +2. Launch Docker Desktop. +3. Sign in with your Docker ID. + +## Support + +For issues related to: + +- Docker Desktop configuration, usage, or licensing: Create a support ticket through [Docker Support](https://hub.docker.com/support). +- Dev Box creation, Azure portal configuration, or networking: Contact Azure Support. + +## Limitations + +- Microsoft Dev Box is currently only available on Windows 10 and 11 (Linux VMs are not supported). +- Performance may vary based on your Dev Box configuration and network conditions. diff --git a/content/manuals/desktop/setup/install/enterprise-deployment/faq.md b/content/manuals/enterprise/enterprise-deployment/faq.md similarity index 64% rename from content/manuals/desktop/setup/install/enterprise-deployment/faq.md rename to content/manuals/enterprise/enterprise-deployment/faq.md index 979485823707..be742a0d6390 100644 --- a/content/manuals/desktop/setup/install/enterprise-deployment/faq.md +++ b/content/manuals/enterprise/enterprise-deployment/faq.md @@ -1,20 +1,27 @@ --- title: Enterprise deployment FAQs +linkTitle: FAQs description: Frequently asked questions for deploying Docker Desktop at scale keywords: msi, deploy, docker desktop, faqs, pkg, mdm, jamf, intune, windows, mac, enterprise, admin tags: [FAQ, admin] +weight: 70 aliases: -- /desktop/install/msi/faq/ -- /desktop/setup/install/msi/faq/ + - /desktop/install/msi/faq/ + - /desktop/setup/install/msi/faq/ + - /desktop/setup/install/enterprise-deployment/faq/ --- ## MSI +Common questions about installing Docker Desktop using the MSI installer. + ### What happens to user data if they have an older Docker Desktop installation (i.e. `.exe`)? -If they have an older `.exe` installation, users must [uninstall](/manuals/desktop/uninstall.md) this version before using the new MSI version. This deletes all Docker containers, images, volumes, and other Docker-related data local to the machine, and removes the files generated by the application. For older versions, users should [backup](/manuals/desktop/settings-and-maintenance/backup-and-restore.md) any containers that they want to keep. +Users must [uninstall](/manuals/desktop/uninstall.md) older `.exe` installations before using the new MSI version. This deletes all Docker containers, images, volumes, and other Docker-related data local to the machine, and removes the files generated by Docker Desktop. + +To preserve existing data before uninstalling, users should [backup](/manuals/desktop/settings-and-maintenance/backup-and-restore.md) their containers and volumes. -For Docker Desktop versions 4.30 and later of the `exe` installer, a `-keep-data` flag is available. It removes Docker Desktop but keeps underlying data, such as the VMs that run containers. +For Docker Desktop 4.30 and later, the `.exe` installer includes a `-keep-data` flag that removes Docker Desktop while preserving underlying resources such as the container VMs: ```powershell & 'C:\Program Files\Docker\Docker\Docker Desktop Installer.exe' uninstall -keep-data @@ -22,11 +29,11 @@ For Docker Desktop versions 4.30 and later of the `exe` installer, a `-keep-data ### What happens if the user's machine has an older `.exe` installation? -The new MSI installer checks if a previous version was installed and doesn't proceed with the installation. Instead, it prompts the user to uninstall their current/old version first, before retrying to install the MSI version. +The MSI installer detects older `.exe` installations and blocks the installation until the previous version is uninstalled. It prompts the user to uninstall their current/old version first, before retrying to install the MSI version. ### My installation failed, how do I find out what happened? -MSI installations can sometimes fail unexpectedly and not provide users with much information about what went wrong. +MSI installations may fail silently, offering little diagnostic feedback. To debug a failed installation, run the install again with verbose logging enabled: @@ -78,4 +85,18 @@ Add-LocalGroupMember -Group $Group -Member $CurrentUser > [!NOTE] > -> After adding a new user to the `docker-users` group, the user must sign out and then sign back in for the changes to take effect. \ No newline at end of file +> After adding a new user to the `docker-users` group, the user must sign out and then sign back in for the changes to take effect. + +## MDM + +Common questions about deploying Docker Desktop using mobile device management +(MDM) tools such as Jamf, Intune, or Workspace ONE. + +### Why doesn't my MDM tool apply all Docker Desktop configuration settings at once? + +Some MDM tools, such as Workspace ONE, may not support applying multiple +configuration settings in a single XML file. In these cases, you may need to +deploy each setting in a separate XML file. + +Refer to your MDM provider's documentation for specific deployment +requirements or limitations. \ No newline at end of file diff --git a/content/manuals/enterprise/enterprise-deployment/ms-store.md b/content/manuals/enterprise/enterprise-deployment/ms-store.md new file mode 100644 index 000000000000..fab8e7609113 --- /dev/null +++ b/content/manuals/enterprise/enterprise-deployment/ms-store.md @@ -0,0 +1,47 @@ +--- +title: Install Docker Desktop from the Microsoft Store on Windows +linkTitle: MS Store +description: Install Docker Desktop for Windows through the Microsoft Store. Understand its update behavior and limitations. +keywords: microsoft store, windows, docker desktop, install, deploy, configure, admin, mdm, intune, winget +tags: [admin] +weight: 30 +aliases: + - /desktop/setup/install/enterprise-deployment/ms-store/ +--- + +You can deploy Docker Desktop for Windows through the [Microsoft app store](https://apps.microsoft.com/detail/xp8cbj40xlbwkx?hl=en-GB&gl=GB). + +The Microsoft Store version of Docker Desktop provides the same functionality as the standard installer but has a different update behavior depending on whether your developers install it themselves or if installation is handled by an MDM tool such as Intune. This is described in the following section. + +Choose the installation method that best aligns with your environment's requirements and management practices. + +## Update behavior + +### Developer-managed installations + +For developers who install Docker Desktop directly: + +- The Microsoft Store does not automatically update Win32 apps like Docker Desktop for most users. +- Only a subset of users (approximately 20%) may receive update notifications on the Microsoft Store page. +- Most users must manually check for and apply updates within the Store. + +### Intune-managed installations + +In environments managed with Intune: +- Intune checks for updates approximately every 8 hours. +- When a new version is detected, Intune triggers a `winget` upgrade. +- If appropriate policies are configured, updates can occur automatically without user intervention. +- Updates are handled by Intune's management infrastructure rather than the Microsoft Store itself. + +## WSL considerations + +Docker Desktop for Windows integrates closely with WSL. When updating Docker Desktop installed from the Microsoft Store: +- Make sure you have quit Docker Desktop and that it is no longer running so updates can complete successfully +- In some environments, virtual hard disk (VHDX) file locks may prevent the update from completing. + +## Recommendations for Intune management + +If using Intune to manage Docker Desktop for Windows: +- Ensure your Intune policies are configured to handle application updates +- Be aware that the update process uses WinGet APIs rather than direct Store mechanisms +- Consider testing the update process in a controlled environment to verify proper functionality diff --git a/content/manuals/desktop/setup/install/enterprise-deployment/msi-install-and-configure.md b/content/manuals/enterprise/enterprise-deployment/msi-install-and-configure.md similarity index 72% rename from content/manuals/desktop/setup/install/enterprise-deployment/msi-install-and-configure.md rename to content/manuals/enterprise/enterprise-deployment/msi-install-and-configure.md index 9ddc34b2b6d9..d2fecfc1fef1 100644 --- a/content/manuals/desktop/setup/install/enterprise-deployment/msi-install-and-configure.md +++ b/content/manuals/enterprise/enterprise-deployment/msi-install-and-configure.md @@ -1,27 +1,28 @@ --- -title: Use the MSI installer +title: MSI installer description: Understand how to use the MSI installer. Also explore additional configuration options. keywords: msi, windows, docker desktop, install, deploy, configure, admin, mdm tags: [admin] weight: 10 -aliases: -- /desktop/install/msi/install-and-configure/ -- /desktop/setup/install/msi/install-and-configure/ +aliases: + - /desktop/install/msi/install-and-configure/ + - /desktop/setup/install/msi/install-and-configure/ + - /desktop/install/msi/ + - /desktop/setup/install/msi/ + - /desktop/setup/install/enterprise-deployment/msi-install-and-configure/ --- -> [!NOTE] -> -> The MSI installer is available to all company and organization owners with a Business subscription and Docker Desktop version 4.32 and later. +{{< summary-bar feature_name="MSI installer" >}} The MSI package supports various MDM (Mobile Device Management) solutions, making it ideal for bulk installations and eliminating the need for manual setups by individual users. With this package, IT administrators can ensure standardized, policy-driven installations of Docker Desktop, enhancing efficiency and software management across their organizations. ## Install interactively -1. In the [Docker Admin Console](http://admin.docker.com/), navigate to your organization. -2. Under **Security and access**, select the **Deploy Docker Desktop** page. -3. From the **Windows OS** tab, select the **Download MSI installer** button. -4. Once downloaded, double-click `Docker Desktop Installer.msi` to run the installer. -5. Once you've accepted the license agreement, you can choose the install location. By default, Docker Desktop is installed at `C:\Program Files\Docker\Docker`. +1. In [Docker Home](http://app.docker.com), choose your organization. +2. Select **Admin Console**, then **Enterprise deployment**. +3. From the **Windows OS** tab, select the **Download MSI installer** button. +4. Once downloaded, double-click `Docker Desktop Installer.msi` to run the installer. +5. After accepting the license agreement, choose the install location. By default, Docker Desktop is installed at `C:\Program Files\Docker\Docker`. 6. Configure the Docker Desktop installation. You can: - Create a desktop shortcut @@ -30,21 +31,22 @@ The MSI package supports various MDM (Mobile Device Management) solutions, makin - Disable Windows Container usage - - Select the engine for Docker Desktop. Either WSL or Hyper-V. If your system only supports one of the two options, you won't be able to select which backend to use. + - Select the Docker Desktop backend: WSL or Hyper-V. If only one is supported by your system, you won't be able to choose. 7. Follow the instructions on the installation wizard to authorize the installer and proceed with the install. 8. When the installation is successful, select **Finish** to complete the installation process. -If your administrator account is different to your user account, you must add the user to the **docker-users** group: +If your administrator account is different from your user account, you must add the user to the **docker-users** group to access features that require higher privileges, such as creating and managing the Hyper-V VM, or using Windows containers: + 1. Run **Computer Management** as an **administrator**. -2. Navigate to **Local Users and Groups** > **Groups** > **docker-users**. +2. Navigate to **Local Users and Groups** > **Groups** > **docker-users**. 3. Right-click to add the user to the group. 4. Sign out and sign back in for the changes to take effect. > [!NOTE] > -> When installing Docker Desktop with the MSI, in-app updates are automatically disabled. This feature ensures your organization maintains the required Docker Desktop version. For Docker Desktop installed with the .exe installer, in-app updates remain supported. +> When installing Docker Desktop with the MSI, in-app updates are automatically disabled. This ensures organizations can maintain version consistency and prevent unapproved updates. For Docker Desktop installed with the .exe installer, in-app updates remain supported. > -> Docker Desktop notifies you when an update is available. To update Docker Desktop, download the latest installer from the Docker Admin Console. Navigate to the **Deploy Docker Desktop** page > under **Security and access**. +> Docker Desktop notifies you when an update is available. To update Docker Desktop, download the latest installer from the Docker Admin Console. Navigate to the **Enterprise deployment** page. > > To keep up to date with new releases, check the [release notes](/manuals/desktop/release-notes.md) page. @@ -52,9 +54,9 @@ If your administrator account is different to your user account, you must add th This section covers command line installations of Docker Desktop using PowerShell. It provides common installation commands that you can run. You can also add additional arguments which are outlined in [configuration options](#configuration-options). -When installing Docker Desktop, you can choose between interactive or non-interactive installations. +When installing Docker Desktop, you can choose between interactive or non-interactive installations. -Interactive installations, without specifying `/quiet` or `/qn`, display the user interface and let you select your own properties. +Interactive installations, without specifying `/quiet` or `/qn`, display the user interface and let you select your own properties. When installing via the user interface it's possible to: @@ -72,37 +74,43 @@ Non-interactive installations are silent and any additional configuration must b > > Admin rights are required to run any of the following commands. -#### Installing interactively with verbose logging +#### Install interactively with verbose logging ```powershell msiexec /i "DockerDesktop.msi" /L*V ".\msi.log" ``` -#### Installing interactively without verbose logging +#### Install interactively without verbose logging ```powershell msiexec /i "DockerDesktop.msi" ``` -#### Installing non-interactively with verbose logging +#### Install non-interactively with verbose logging ```powershell msiexec /i "DockerDesktop.msi" /L*V ".\msi.log" /quiet ``` -#### Installing non-interactively and suppressing reboots +#### Install non-interactively and suppressing reboots ```powershell msiexec /i "DockerDesktop.msi" /L*V ".\msi.log" /quiet /norestart ``` -#### Installing non-interactively with admin settings +#### Install non-interactively with admin settings + +```powershell +msiexec /i "DockerDesktop.msi" /L*V ".\msi.log" /quiet /norestart ADMINSETTINGS="{"configurationFileVersion":2,"enhancedContainerIsolation":{"value":true,"locked":false}}" ALLOWEDORG="your-organization" +``` + +#### Install interactively and allow users to switch to Windows containers without admin rights ```powershell -msiexec /i "DockerDesktop.msi" /L*V ".\msi.log" /quiet /norestart ADMINSETTINGS="{"configurationFileVersion":2,"enhancedContainerIsolation":{"value":true,"locked":false}}" ALLOWEDORG="docker" +msiexec /i "DockerDesktop.msi" /L*V ".\msi.log" /quiet /norestart ALLOWEDORG="your-organization" ALWAYSRUNSERVICE=1 ``` -#### Installing with the passive display option +#### Install with the passive display option You can use the `/passive` display option instead of `/quiet` when you want to perform a non-interactive installation but show a progress dialog. @@ -116,8 +124,8 @@ msiexec /i "DockerDesktop.msi" /L*V ".\msi.log" /passive /norestart > [!TIP] > -> Some useful tips to remember when creating a value that expects a JSON string as it’s value: -> +> When creating a value that expects a JSON string: +> > - The property expects a JSON formatted string > - The string should be wrapped in double quotes > - The string shouldn't contain any whitespace @@ -142,7 +150,7 @@ IdentifyingNumber Name ``` > [!NOTE] > -> This command can take some time to return, depending on the number of installed applications. +> This command may take some time, depending on the number of installed applications. `IdentifyingNumber` is the applications product code and can be used to uninstall Docker Desktop. For example: @@ -150,31 +158,31 @@ IdentifyingNumber Name msiexec /x {10FC87E2-9145-4D7D-B493-2E99E8D8E103} /L*V ".\msi.log" /quiet ``` -#### Uninstalling interactively with verbose logging +#### Uninstall interactively with verbose logging ```powershell msiexec /x "DockerDesktop.msi" /L*V ".\msi.log" ``` -#### Uninstalling interactively without verbose logging +#### Uninstall interactively without verbose logging ```powershell msiexec /x "DockerDesktop.msi" ``` -#### Uninstalling non-interactively with verbose logging +#### Uninstall non-interactively with verbose logging ```powershell msiexec /x "DockerDesktop.msi" /L*V ".\msi.log" /quiet ``` -#### Uninstalling non-interactively without verbose logging +#### Uninstall non-interactively without verbose logging ```powershell msiexec /x "DockerDesktop.msi" /quiet ``` -### Configuration options +### Configuration options > [!IMPORTANT] > @@ -184,7 +192,7 @@ msiexec /x "DockerDesktop.msi" /quiet | :--- | :--- | :--- | | `ENABLEDESKTOPSHORTCUT` | Creates a desktop shortcut. | 1 | | `INSTALLFOLDER` | Specifies a custom location where Docker Desktop will be installed. | C:\Program Files\Docker | -| `ADMINSETTINGS` | Automatically creates an `admin-settings.json` file which is used to [control certain Docker Desktop settings](/manuals/security/for-admins/hardened-desktop/settings-management/_index.md) on client machines within organizations. It must be used together with the `ALLOWEDORG` property. | None | +| `ADMINSETTINGS` | Automatically creates an `admin-settings.json` file which is used to [control certain Docker Desktop settings](/manuals/enterprise/security/hardened-desktop/settings-management/_index.md) on client machines within organizations. It must be used together with the `ALLOWEDORG` property. | None | | `ALLOWEDORG` | Requires the user to sign in and be part of the specified Docker Hub organization when running the application. This creates a registry key called `allowedOrgs` in `HKLM\Software\Policies\Docker\Docker Desktop`. | None | | `ALWAYSRUNSERVICE` | Lets users switch to Windows containers without needing admin rights | 0 | | `DISABLEWINDOWSCONTAINERS` | Disables the Windows containers integration | 0 | @@ -202,11 +210,11 @@ msiexec /x "DockerDesktop.msi" /quiet Additionally, you can also use `/norestart` or `/forcerestart` to control reboot behaviour. -By default, the installer reboots the machine after a successful installation. When ran silently, the reboot is automatic and the user is not prompted. +By default, the installer reboots the machine after a successful installation. When run silently, the reboot is automatic and the user is not prompted. ## Analytics -The MSI installer collects anonymous usage statistics to better understand user behaviour and to improve the user experience by identifying and addressing issues or optimizing popular features. +The MSI installer collects anonymous usage statistics relating to installation only. This is to better understand user behaviour and to improve the user experience by identifying and addressing issues or optimizing popular features. ### How to opt-out @@ -239,8 +247,8 @@ The registry key is as follows: SOFTWARE\Docker Inc.\Docker Desktop\DisableMsiAnalytics ``` -When analytics is disabled, this key has a value of `1`. +When analytics is disabled, this key is set to `1`. ## Additional resources -- [Explore the FAQs](faq.md) +- [Explore the FAQs](/manuals/enterprise/enterprise-deployment/faq.md) diff --git a/content/manuals/desktop/setup/install/enterprise-deployment/pkg-install-and-configure.md b/content/manuals/enterprise/enterprise-deployment/pkg-install-and-configure.md similarity index 55% rename from content/manuals/desktop/setup/install/enterprise-deployment/pkg-install-and-configure.md rename to content/manuals/enterprise/enterprise-deployment/pkg-install-and-configure.md index 1307cdb96ab9..d85894773558 100644 --- a/content/manuals/desktop/setup/install/enterprise-deployment/pkg-install-and-configure.md +++ b/content/manuals/enterprise/enterprise-deployment/pkg-install-and-configure.md @@ -1,14 +1,11 @@ --- -title: Use the PKG installer +title: PKG installer description: Understand how to use the PKG installer. Also explore additional configuration options. keywords: pkg, mac, docker desktop, install, deploy, configure, admin, mdm tags: [admin] weight: 20 -params: - sidebar: - badge: - color: violet - text: EA +aliases: + - /desktop/setup/install/enterprise-deployment/pkg-install-and-configure/ --- {{< summary-bar feature_name="PKG installer" >}} @@ -17,30 +14,30 @@ The PKG package supports various MDM (Mobile Device Management) solutions, makin ## Install interactively -1. In the [Docker Admin Console](http://admin.docker.com/), navigate to your organization. -2. Under **Security and access**, select the **Deploy Docker Desktop** page. -3. From the **macOS** tab, select the **Download PKG installer** button. -4. Once downloaded, double-click `Docker.pkg` to run the installer. -5. Follow the instructions on the installation wizard to authorize the installer and proceed with the install. - - **Introduction**: Select `Continue`. - - **License**: Review the license agreement and select `Agree`. - - **Destination Select**: This step is optional. It is recommended that you don't change the default installation destination (usually `Macintosh HD`). Select `Continue`. - - **Installation Type**: Select `Install`. +1. In [Docker Home](http://app.docker.com), choose your organization. +2. Select **Admin Console**, then **Enterprise deployment**. +3. From the **macOS** tab, select the **Download PKG installer** button. +4. Once downloaded, double-click `Docker.pkg` to run the installer. +5. Follow the instructions on the installation wizard to authorize the installer and proceed with the installation. + - **Introduction**: Select **Continue**. + - **License**: Review the license agreement and select **Agree**. + - **Destination Select**: This step is optional. It is recommended that you keep the default installation destination (usually `Macintosh HD`). Select **Continue**. + - **Installation Type**: Select **Install**. - **Installation**: Authenticate using your administrator password or Touch ID. - - **Summary**: After the installation completes, select `Close`. + - **Summary**: When the installation completes, select **Close**. > [!NOTE] > -> When installing Docker Desktop with the PKG, in-app updates are automatically disabled. This feature ensures your organization maintains the required Docker Desktop version. For Docker Desktop installed with the .dmg installer, in-app updates remain supported. +> When installing Docker Desktop with the PKG, in-app updates are automatically disabled. This ensures organizations can maintain version consistency and prevent unapproved updates. For Docker Desktop installed with the `.dmg` installer, in-app updates remain supported. > -> Docker Desktop notifies you when an update is available. To update Docker Desktop, download the latest installer from the Docker Admin Console. Navigate to the **Deploy Docker Desktop** page > under **Security and access**. +> Docker Desktop notifies you when an update is available. To update Docker Desktop, download the latest installer from the Docker Admin Console. Navigate to the **Enterprise deployment** page. > > To keep up to date with new releases, check the [release notes](/manuals/desktop/release-notes.md) page. ## Install from the command line -1. In the [Docker Admin Console](http://admin.docker.com/), navigate to your organization. -2. Under **Security and access**, select the **Deploy Docker Desktop** page. +1. In [Docker Home](http://app.docker.com), choose your organization. +2. Select **Admin Console**, then **Enterprise deployment**. 3. From the **macOS** tab, select the **Download PKG installer** button. 4. From your terminal, run the following command: @@ -50,5 +47,5 @@ The PKG package supports various MDM (Mobile Device Management) solutions, makin ## Additional resources -- See how you can deploy Docker Desktop for Mac via [Intune](use-intune.md) or [Jamf Pro](use-jamf-pro.md) -- Explore how to [Enforce sign-in](/manuals/security/for-admins/enforce-sign-in/methods.md#plist-method-mac-only) for your users. \ No newline at end of file +- See how you can deploy Docker Desktop for Mac using [Intune](use-intune.md) or [Jamf Pro](use-jamf-pro.md) +- Explore how to [Enforce sign-in](/manuals/enterprise/security/enforce-sign-in/methods.md#plist-method-mac-only) for your users. \ No newline at end of file diff --git a/content/manuals/desktop/setup/install/enterprise-deployment/use-intune.md b/content/manuals/enterprise/enterprise-deployment/use-intune.md similarity index 70% rename from content/manuals/desktop/setup/install/enterprise-deployment/use-intune.md rename to content/manuals/enterprise/enterprise-deployment/use-intune.md index 721b60359d5d..9e248a1f187e 100644 --- a/content/manuals/desktop/setup/install/enterprise-deployment/use-intune.md +++ b/content/manuals/enterprise/enterprise-deployment/use-intune.md @@ -1,17 +1,18 @@ --- -title: Use Intune +title: Deploy with Intune description: Use Intune, Microsoft's cloud-based device management tool, to deploy Docker Desktop keywords: microsoft, windows, docker desktop, deploy, mdm, enterprise, administrator, mac, pkg, dmg tags: [admin] -weight: 30 +weight: 40 aliases: -- /desktop/install/msi/use-intune/ -- /desktop/setup/install/msi/use-intune/ + - /desktop/install/msi/use-intune/ + - /desktop/setup/install/msi/use-intune/ + - /desktop/setup/install/enterprise-deployment/use-intune/ --- {{< summary-bar feature_name="Intune" >}} -Learn how to deploy Docker Desktop for Windows and Mac using Intune, Microsoft's cloud-based device management tool. +Learn how to deploy Docker Desktop on Windows and macOS devices using Microsoft Intune. It covers app creation, installer configuration, and assignment to users or devices. {{< tabs >}} {{< tab name="Windows" >}} @@ -20,7 +21,7 @@ Learn how to deploy Docker Desktop for Windows and Mac using Intune, Microsoft's 2. Add a new app. Select **Apps**, then **Windows**, then **Add**. 3. For the app type, select **Windows app (Win32)** 4. Select the `intunewin` package. -5. Complete any relevant details such as the description, publisher, or app version and then select **Next**. +5. Fill in the required details, such as the description, publisher, or app version and then select **Next**. 6. Optional: On the **Program** tab, you can update the **Install command** field to suit your needs. The field is pre-populated with `msiexec /i "DockerDesktop.msi" /qn`. See the [Common installation scenarios](msi-install-and-configure.md) for examples on the changes you can make. > [!TIP] @@ -29,9 +30,9 @@ Learn how to deploy Docker Desktop for Windows and Mac using Intune, Microsoft's > > This is because the Docker Desktop installer installs Windows features depending on your engine selection and also updates the membership of the `docker-users` local group. > - > You may also want to set Intune to determine behaviour based on return codes and watch for a return code of `3010`. + > You may also want to set Intune to determine behaviour based on return codes and watch for a return code of `3010`. Return code 3010 means the installation succeeded but a reboot is required. -7. Complete the rest of the tabs and then review and create the app. +7. Complete the remaining tabs, then review and create the app. {{< /tab >}} {{< tab name="Mac" >}} @@ -39,7 +40,7 @@ Learn how to deploy Docker Desktop for Windows and Mac using Intune, Microsoft's First, upload the package: 1. Sign in to your Intune admin center. -2. Add a new app. Select **Apps**, then **macOSs**, then **Add**. +2. Add a new app. Select **Apps**, then **macOS**, then **Add**. 3. Select **Line-of-business app** and then **Select**. 4. Upload the `Docker.pkg` file and fill in the required details. @@ -55,4 +56,4 @@ Next, assign the app: ## Additional resources - [Explore the FAQs](faq.md). -- Learn how to [Enforce sign-in](/manuals/security/for-admins/enforce-sign-in/_index.md) for your users. \ No newline at end of file +- Learn how to [enforce sign-in](/manuals/enterprise/security/enforce-sign-in/_index.md) for your users. \ No newline at end of file diff --git a/content/manuals/desktop/setup/install/enterprise-deployment/use-jamf-pro.md b/content/manuals/enterprise/enterprise-deployment/use-jamf-pro.md similarity index 54% rename from content/manuals/desktop/setup/install/enterprise-deployment/use-jamf-pro.md rename to content/manuals/enterprise/enterprise-deployment/use-jamf-pro.md index e2b2b98fff76..637720799429 100644 --- a/content/manuals/desktop/setup/install/enterprise-deployment/use-jamf-pro.md +++ b/content/manuals/enterprise/enterprise-deployment/use-jamf-pro.md @@ -1,30 +1,34 @@ --- -title: Use Jamf Pro -description: Use Jamf Pro to deploy Docker Desktop +title: Deploy with Jamf Pro +description: Use Jamf Pro to deploy Docker Desktop for Mac keywords: jamf, mac, docker desktop, deploy, mdm, enterprise, administrator, pkg tags: [admin] -weight: 40 +weight: 50 +aliases: + - /desktop/setup/install/enterprise-deployment/use-jamf-pro/ --- -Learn how to deploy Docker Desktop for Mac using Jamf Pro. +{{< summary-bar feature_name="Jamf Pro" >}} + +Learn how to deploy Docker Desktop for Mac using Jamf Pro, including uploading the installer and creating a deployment policy. First, upload the package: -1. From the Jamf pro console, Navigate to **Computers** > **Management Settings** > **Computer Management** > **Packages**. +1. From the Jamf Pro console, navigate to **Computers** > **Management Settings** > **Computer Management** > **Packages**. 2. Select **New** to add a new package. 3. Upload the `Docker.pkg` file. Next, create a policy for deployment: 1. Navigate to **Computers** > **Policies**. -2. Select **New**to create a new policy. +2. Select **New** to create a new policy. 3. Enter a name for the policy, for example "Deploy Docker Desktop". 4. Under the **Packages** tab, add the Docker package you uploaded. -5. Configure the scope to target the devices or device groups you want to install Docker on. +5. Configure the scope to target the devices or device groups on which you want to install Docker. 6. Save the policy and deploy. For more information, see [Jamf Pro's official documentation](https://learn.jamf.com/en-US/bundle/jamf-pro-documentation-current/page/Policies.html). ## Additional resources -- Learn how to [Enforce sign-in](/manuals/security/for-admins/enforce-sign-in/_index.md) for your users. \ No newline at end of file +- Learn how to [enforce sign-in](/manuals/enterprise/security/enforce-sign-in/_index.md) for your users. \ No newline at end of file diff --git a/content/manuals/enterprise/security/_index.md b/content/manuals/enterprise/security/_index.md new file mode 100644 index 000000000000..800adb38e106 --- /dev/null +++ b/content/manuals/enterprise/security/_index.md @@ -0,0 +1,74 @@ +--- +linkTitle: Security +title: Security for enterprises +description: Learn about enterprise level security features Docker has to offer and explore best practices +keywords: docker, docker hub, docker desktop, security, enterprises, scale +weight: 10 +params: + sidebar: + group: Enterprise +grid_admins: +- title: Settings Management + description: Learn how Settings Management can secure your developers' workflows. + icon: shield_locked + link: /enterprise/security/hardened-desktop/settings-management/ +- title: Enhanced Container Isolation + description: Understand how Enhanced Container Isolation can prevent container attacks. + icon: security + link: /enterprise/security/hardened-desktop/enhanced-container-isolation/ +- title: Registry Access Management + description: Control the registries developers can access while using Docker Desktop. + icon: home_storage + link: /enterprise/security/hardened-desktop/registry-access-management/ +- title: Image Access Management + description: Control the images developers can pull from Docker Hub. + icon: photo_library + link: /enterprise/security/hardened-desktop/image-access-management/ +- title: "Air-Gapped Containers" + description: Restrict containers from accessing unwanted network resources. + icon: "vpn_lock" + link: /enterprise/security/hardened-desktop/air-gapped-containers/ +- title: Enforce sign-in + description: Configure sign-in for members of your teams and organizations. + link: /enterprise/security/enforce-sign-in/ + icon: passkey +- title: Domain management + description: Identify uncaptured users in your organization. + link: /enterprise/security/domain-management/ + icon: person_search +- title: Docker Scout + description: Explore how Docker Scout can help you create a more secure software supply chain. + icon: query_stats + link: /scout/ +- title: SSO + description: Learn how to configure SSO for your company or organization. + icon: key + link: /enterprise/security/single-sign-on/ +- title: SCIM + description: Set up SCIM to automatically provision and deprovision users. + icon: checklist + link: /enterprise/security/provisioning/scim/ +- title: Roles and permissions + description: Assign roles to individuals giving them different permissions within an organization. + icon: badge + link: /enterprise/security/roles-and-permissions/ +- title: Private marketplace for Extensions (Beta) + description: Learn how to configure and set up a private marketplace with a curated list of extensions for your Docker Desktop users. + icon: storefront + link: /desktop/extensions/private-marketplace/ +- title: Organization access tokens + description: Create organization access tokens as an alternative to a password. + link: /enterprise/security/access-tokens/ + icon: password +--- + +Docker provides security guardrails for both administrators and developers. + +If you're an administrator, you can enforce sign-in across Docker products for your developers, and +scale, manage, and secure your instances of Docker Desktop with DevOps security controls like Enhanced Container Isolation and Registry Access Management. + +## For administrators + +Explore the security features Docker offers to satisfy your company's security policies. + +{{< grid items="grid_admins" >}} \ No newline at end of file diff --git a/content/manuals/enterprise/security/access-tokens.md b/content/manuals/enterprise/security/access-tokens.md new file mode 100644 index 000000000000..ce84a1893e61 --- /dev/null +++ b/content/manuals/enterprise/security/access-tokens.md @@ -0,0 +1,121 @@ +--- +title: Organization access tokens +description: Learn how to create and manage organization access tokens + to securely push and pull images programmatically. +keywords: docker hub, security, OAT, organization access token +linkTitle: Organization access tokens +aliases: + - /security/for-admins/access-tokens/ +--- + +{{< summary-bar feature_name="OATs" >}} + +> [!WARNING] +> +> Organization access tokens (OATs) are incompatible with Docker Desktop, +> [Image Access Management (IAM)](/manuals/enterprise/security/hardened-desktop/image-access-management.md), and [Registry Access Management (RAM)](/manuals/enterprise/security/hardened-desktop/registry-access-management.md). +> +> If you use Docker Desktop, IAM, or RAM, you must use personal +> access tokens instead. + +An organization access token (OAT) is like a [personal access token +(PAT)](/security/access-tokens/), but an OAT is associated with +an organization and not a single user account. Use an OAT instead of a PAT to +let business-critical tasks access Docker Hub repositories without connecting +the token to single user. You must have a [Docker Team or Business +subscription](/subscription/core-subscription/details/) to use OATs. + +OATs provide the following advantages: + +- You can investigate when the OAT was last used and then disable or delete it + if you find any suspicious activity. +- You can limit what each OAT has access to, which limits the impact if an OAT + is compromised. +- All company or organization owners can manage OATs. If one owner leaves the + organization, the remaining owners can still manage the OATs. +- OATs have their own Docker Hub usage limits that don't count towards your + personal account's limits. + +If you have existing [service accounts](/docker-hub/service-accounts/), +Docker recommends that you replace the service accounts with OATs. OATs offer +the following advantages over service accounts: + +- Access permissions are easier to manage with OATs. You can assign access + permissions to OATs, while service accounts require using teams for access + permissions. +- OATs are easier to manage. OATs are centrally managed in the Admin Console. + For service accounts, you may need to sign in to that service account to + manage it. If using single sign-on enforcement and the service account is not + in your IdP, you may not be able to sign in to the service account to manage + it. +- OATs are not associated with a single user. If a user with access to the + service account leaves your organization, you may lose access to the service + account. OATs can be managed by any company or organization owner. + +## Create an organization access token + +> [!IMPORTANT] +> +> Treat access tokens like a password and keep them secret. Store your tokens +> securely in a credential manager for example. + +Company or organization owners can create up to: +- 10 OATs for organizations with a Team subscription +- 100 OATs for organizations with a Business subscription + +Expired tokens count towards the total amount of tokens. + +To create an OAT: + +1. Sign in to [Docker Home](https://app.docker.com/) and select your +organization. +1. Select **Admin Console**, then **Access tokens**. +1. Select **Generate access token**. +1. Add a label and optional description for your token. Use something that +indicates the use case or purpose of the token. +1. Select the expiration date for the token. +1. Expand the **Repository** drop-down to set access permission +scopes for your token. To set Repository access scopes: + 1. Optional. Select **Read public repositories**. + 1. Select **Add repository** and choose a repository from the drop-down. + 1. Set the scopes for your repository — **Image Push** or + **Image Pull**. + 1. Add more repositories as needed. You can add up to 50 repositories. +1. Optional. Expand the **Organization** drop-down and select the +**Allow management access to this organization's resources** checkbox. This +setting enables organization management scopes for your token. The following +organization management scopes are available: + - **Member Edit**: Edit members of the organization + - **Member Read**: Read members of the organization + - **Invite Edit**: Invite members to the organization + - **Invite Read**: Read invites to the organization + - **Group Edit**: Edit groups of the organization + - **Group Read**: Read groups of the organization +1. Select **Generate token**. Copy the token that appears on the screen + and save it. You won't be able to retrieve the token once you exit the + screen. + +## Use an organization access token + +You can use an organization access token when you sign in using Docker CLI. + +Sign in from your Docker CLI client with the following command, replacing +`YOUR_ORG` with your organization name: + +```console +$ docker login --username +``` + +When prompted for a password, enter your organization access token instead of a +password. + +## Modify existing tokens + +You can rename, update the description, update the repository access, +deactivate, or delete a token as needed. + +1. Sign in to [Docker Home](https://app.docker.com/) and select your +organization. +1. Select **Admin Console**, then **Access tokens**. +1. Select the actions menu in the token row, then select **Deactivate**, **Edit**, or **Delete** to modify the token. For **Inactive** tokens, you can only select **Delete**. +1. If editing a token, select **Save** after specifying your modifications. diff --git a/content/manuals/enterprise/security/domain-management.md b/content/manuals/enterprise/security/domain-management.md new file mode 100644 index 000000000000..0d3d0ef17c45 --- /dev/null +++ b/content/manuals/enterprise/security/domain-management.md @@ -0,0 +1,204 @@ +--- +description: Learn how to manage domains and users in the Admin Console +keywords: domain management, security, identify users, manage users +title: Domain management +weight: 55 +aliases: + - /security/for-admins/domain-management/ +--- + +{{< summary-bar feature_name="Domain management" >}} + +Domain management lets you add and verify domains, and enable +auto-provisioning for users. Auto-provisioning adds users to your +organization when they sign in with an email address that matches a verified +domain. + +This simplifies user management, ensures consistent security settings, and +reduces the risk of unmanaged users accessing Docker without visibility +or control. + +## Add a domain + +1. Sign in to [Docker Home](https://app.docker.com) and select +your organization. If your organization is part of a company, select the company +and configure the domain for the organization at the company level. +1. Select **Admin Console**, then **Domain management**. +1. Select **Add a domain**. +1. Enter your domain and select **Add domain**. +1. In the pop-up modal, copy the **TXT Record Value** to verify your domain. + +## Verify a domain + +Verifying your domain confirms that you own it. To verify, add a TXT record to +your Domain Name System (DNS) host using the value provided by Docker. This +value proves ownership and instructs your DNS to publish the record. + +It can take up to 72 hours for the DNS change to propagate. Docker automatically +checks for the record and confirms ownership once the change is recognized. + +Follow your DNS provider’s documentation to add the **TXT Record Value**. If +your provider isn't listed, use the steps for other providers. + +> [!TIP] +> +> The record name field determines where the TXT record is added in your domain +(root or subdomain). In general, refer to the following tips for +adding a record name: +> +> - Use `@` or leave the record name empty for root domains like `example.com`, +depending on your provider. +> - Don't enter values like `docker`, `docker-verification`, `www`, or your +domain name. These values may direct to the wrong place. +> +> Check your DNS provider's documentation to verify record name requirements. + +{{< tabs >}} +{{< tab name="AWS Route 53" >}} + +1. To add your TXT record to AWS, see [Creating records by using the Amazon Route 53 console](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resource-record-sets-creating.html). +1. TXT record verification can take 72 hours. Once you have waited for +TXT record verification, return to the **Domain management** page of the +[Admin Console](https://app.docker.com/admin) and select **Verify** next to +your domain name. + +{{< /tab >}} +{{< tab name="Google Cloud DNS" >}} + +1. To add your TXT record to Google Cloud DNS, see [Verifying your domain with a TXT record](https://cloud.google.com/identity/docs/verify-domain-txt). +1. TXT record verification can take 72 hours. Once you have waited for TXT +record verification, return to the **Domain management** page of the +[Admin Console](https://app.docker.com/admin) and select **Verify** next to +your domain name. + +{{< /tab >}} +{{< tab name="GoDaddy" >}} + +1. To add your TXT record to GoDaddy, see [Add a TXT record](https://www.godaddy.com/help/add-a-txt-record-19232). +1. TXT record verification can take 72 hours. Once you have waited for TXT +record verification, return to the **Domain management** page of the +[Admin Console](https://app.docker.com/admin) and select **Verify** next to your +domain name. + +{{< /tab >}} +{{< tab name="Other providers" >}} + +1. Sign in to your domain host. +1. Add a TXT record to your DNS settings and save the record. +1. TXT record verification can take 72 hours. Once you have waited for TXT +record verification, return to the **Domain management** page of the +[Admin Console](https://app.docker.com/admin) and select **Verify** next to +your domain name. + +{{< /tab >}} +{{< /tabs >}} + +## Delete a domain + +Deleting a domain removes the assigned TXT record value. To delete a domain: + +1. Sign in to [Docker Home](https://app.docker.com) and select +your organization. If your organization is part of a company, select the company +and configure the domain for the organization at the company level. +1. Select **Admin Console**, then **Domain management**. +1. For the domain you want to delete, section the **Actions** menu, then +**Delete domain**. +1. To confirm, select **Delete domain** in the pop-up modal. + +## Audit domains + +{{< summary-bar feature_name="Domain audit" >}} + +The domain audit feature identifies uncapture users in an organization. +Uncaptured users are Docker users who have authenticated to Docker +using an email address associated with one of your verified domains, +but they're not a member of your Docker organization. + +### Known limitations + +Domain audit can't identify the following Docker users: + +- Users who access Docker Desktop without authenticating +- Users who authenticate using an account that doesn't have an +email address associated with one of your verified domains. + +Although domain audit can't identify all Docker users, +you can enforce sign-in to prevent unidentifiable users from accessing +Docker Desktop in your environment. For more information, +see [Enforce sign-in](/manuals/enterprise/security/enforce-sign-in/_index.md). + +### Audit your domain for uncaptured users + +1. Sign in to [Docker Home](https://app.docker.com) and choose your +company. +1. Select **Admin Console**, then **Domain management**. +1. In **Domain audit**, select **Export Users** to export a CSV file +of uncaptured users. + +The CSV file contains the following columns: + + - Name: Name of the Docker user + - Username: Docker ID of the Docker user + - Email: Email address of the Docker user + +### Invite uncaptured users + +You can invite all uncaptured users to your organization using the exported +CSV file. For more information on bulk inviting users, see +[Manage organization members](/manuals/admin/organization/members.md). + +## Auto-provisioning + +You must add and verify a domain before enabling auto-provisioning. This +confirms your organization owns the domain. Once a domain is verified, +Docker can automatically associate matching users with your organization. +Auto-provisioning does not require an SSO connection. + +> [!IMPORTANT] +> +> For domains that are part of an SSO connection, Just-in-Time (JIT) overrides +auto-provisioning to add users to an organization. + +### How it works + +When auto-provisioning is enabled for a verified domain, the next time a user +signs into Docker with an email address that is associated with your verified +domain, they are automatically added to your organization. Auto-provisioning +does not create accounts for new users, it adds existing unassociated users to +your organization. Users will *not* experience any sign in or user experience +changes. + +When a new user is auto-provisioned, company and organization owners will +receive an email notifying them that a new user has been added to their +organization. If you need to add more seats to your organization to +to accomodate new users, see [Manage seats](/manuals/subscription/manage-seats.md). + +### Enable auto-provisioning + +Auto-provisioning is enabled per user. To enable +auto-provisioning: + +1. Sign in to [Docker Home](https://app.docker.com) and select +your organization. If your organization is part of a company, select the company +and configure the domain for the organization at the company level. +1. Select **Admin Console**, then **Domain management**. +1. Select the **Actions menu** next to the user you want to enable +auto-provisioning for. +1. Select **Enable auto-provisioning**. +1. Optional. If enabling auto-provisioning at the company level, select an +organization for the user. +1. Select **Enable** to confirm. + +The **Auto-provisioning** column will update to **Enabled**. + +### Disable auto-provisioning + +To disable auto-provisioning for a user: + +1. Sign in to [Docker Home](https://app.docker.com) and select +your organization. If your organization is part of a company, select the company +and configure the domain for the organization at the company level. +1. Select **Admin Console**, then **Domain management**. +1. Select the **Actions menu** next to your user. +1. Select **Disable auto-provisioning**. +1. Select **Disable**. diff --git a/content/manuals/security/for-admins/enforce-sign-in/_index.md b/content/manuals/enterprise/security/enforce-sign-in/_index.md similarity index 82% rename from content/manuals/security/for-admins/enforce-sign-in/_index.md rename to content/manuals/enterprise/security/enforce-sign-in/_index.md index 44f6e32994ba..c0cc6e5088d9 100644 --- a/content/manuals/security/for-admins/enforce-sign-in/_index.md +++ b/content/manuals/enterprise/security/enforce-sign-in/_index.md @@ -8,6 +8,7 @@ tags: [admin] aliases: - /security/for-admins/configure-sign-in/ - /docker-hub/configure-sign-in/ + - /security/for-admins/enforce-sign-in/ weight: 30 --- @@ -16,10 +17,13 @@ weight: 30 By default, members of your organization can use Docker Desktop without signing in. When users don’t sign in as a member of your organization, they don’t receive the [benefits of your organization’s -subscription](../../../subscription/details.md) and they can circumvent [Docker’s -security features](/manuals/security/for-admins/hardened-desktop/_index.md) for your organization. +subscription](/manuals/subscription/details.md) and they can circumvent +[Docker’s +security features](/manuals/enterprise/security/hardened-desktop/_index.md) for +your organization. -There are multiple methods for enforcing sign-in, depending on your companies' set up and preferences: +There are multiple methods for enforcing sign-in, depending on your companies' +set up and preferences: - [Registry key method (Windows only)](methods.md#registry-key-method-windows-only){{< badge color=green text="New" >}} - [Configuration profiles method (Mac only)](methods.md#configuration-profiles-method-mac-only){{< badge color=green text="New" >}} - [`.plist` method (Mac only)](methods.md#plist-method-mac-only){{< badge color=green text="New" >}} @@ -27,8 +31,8 @@ There are multiple methods for enforcing sign-in, depending on your companies' s ## How is sign-in enforced? -When Docker Desktop starts and it detects a registry key, `.plist` file, or `registry.json` file, the -following occurs: +When Docker Desktop starts and it detects a registry key, `.plist` file, or +`registry.json` file, the following occurs: - A **Sign in required!** prompt appears requiring the user to sign in as a member of your organization to use Docker Desktop. ![Enforce Sign-in @@ -41,10 +45,15 @@ following occurs: - When a user signs out, the **Sign in required!** prompt appears and they can no longer use Docker Desktop. +> [!NOTE] +> +> Enforcing sign-in for Docker Desktop does not impact accessing the Docker CLI. +CLI access is only impacted for organizations that enforce single sign-on. + ## Enforcing sign-in versus enforcing single sign-on (SSO) -[Enforcing SSO](/manuals/security/for-admins/single-sign-on/connect.md) and -enforcing sign-in are different features. The following table provides a +[Enforcing SSO](/manuals/enterprise/security/single-sign-on/connect.md#optional-enforce-sso) +and enforcing sign-in are different features. The following table provides a description and benefits when using each feature. | Enforcement | Description | Benefits | @@ -56,5 +65,5 @@ description and benefits when using each feature. ## What's next? -- To enforce sign-in, review the [Methods](/manuals/security/for-admins/enforce-sign-in/methods.md) guide. -- To enforce SSO, review the [Enforce SSO](/manuals/security/for-admins/single-sign-on/connect.md) steps. \ No newline at end of file +- To enforce sign-in, review the [Methods](/manuals/enterprise/security/enforce-sign-in/methods.md) guide. +- To enforce SSO, review the [Enforce SSO](/manuals/enterprise/security/single-sign-on/connect.md) steps. \ No newline at end of file diff --git a/content/manuals/security/for-admins/enforce-sign-in/methods.md b/content/manuals/enterprise/security/enforce-sign-in/methods.md similarity index 97% rename from content/manuals/security/for-admins/enforce-sign-in/methods.md rename to content/manuals/enterprise/security/enforce-sign-in/methods.md index 863af9571ccb..6b4052140000 100644 --- a/content/manuals/security/for-admins/enforce-sign-in/methods.md +++ b/content/manuals/enterprise/security/enforce-sign-in/methods.md @@ -4,6 +4,8 @@ keywords: authentication, registry.json, configure, enforce sign-in, docker desk title: Ways to enforce sign-in for Docker Desktop tags: [admin] linkTitle: Methods +aliases: + - /security/for-admins/enforce-sign-in/methods/ --- {{< summary-bar feature_name="Enforce sign-in" >}} @@ -27,7 +29,7 @@ To enforce sign-in for Docker Desktop on Windows, you can configure a registry k > [!IMPORTANT] > > As of Docker Desktop version 4.36 and later, you can add more than one organization. With Docker Desktop version 4.35 and earlier, if you add more than one organization sign-in enforcement silently fails. -3. Use your organization's name, all lowercase as string data. +3. Use your organization's name, all lowercase as string data. If you're adding more than one organization, make sure they are all on their own line. Don't use any other separators such as spaces or commas. 4. Restart Docker Desktop. 5. When Docker Desktop restarts, verify that the **Sign in required!** prompt appears. @@ -53,10 +55,7 @@ The following example outlines how to deploy a registry key to enforce sign-in o ## Configuration profiles method (Mac only) -> [!NOTE] -> -> The configuration profiles method is in [Early Access](/manuals/release-lifecycle.md) -> and is available with Docker Desktop version 4.36 and later. +{{< summary-bar feature_name="Config profiles" >}} Configuration profiles are a feature of macOS that let you distribute configuration information to the Macs you manage. It is the safest method to @@ -124,6 +123,8 @@ tampered with by the users. 4. Use a MDM solution to distribute your modified `.mobileconfig` file to your macOS clients. +5. Verify that the profile is added to **Device (Managed)** profiles list (**System Settings** > **General** > **Device Management**) on your macOS clients. + ## plist method (Mac only) > [!NOTE] diff --git a/content/manuals/security/for-admins/hardened-desktop/_index.md b/content/manuals/enterprise/security/hardened-desktop/_index.md similarity index 90% rename from content/manuals/security/for-admins/hardened-desktop/_index.md rename to content/manuals/enterprise/security/hardened-desktop/_index.md index bdbbe71c02d9..9da8d7fd5cc0 100644 --- a/content/manuals/security/for-admins/hardened-desktop/_index.md +++ b/content/manuals/enterprise/security/hardened-desktop/_index.md @@ -8,27 +8,28 @@ keywords: security, hardened desktop, enhanced container isolation, registry acc tags: [admin] aliases: - /desktop/hardened-desktop/ + - /security/for-admins/hardened-desktop/ grid: - title: "Settings Management" description: Learn how Settings Management can secure your developers' workflows. icon: shield_locked - link: /security/for-admins/hardened-desktop/settings-management/ + link: /enterprise/security/hardened-desktop/settings-management/ - title: "Enhanced Container Isolation" description: Understand how Enhanced Container Isolation can prevent container attacks. icon: "security" - link: /security/for-admins/hardened-desktop/enhanced-container-isolation/ + link: /enterprise/security/hardened-desktop/enhanced-container-isolation/ - title: "Registry Access Management" description: Control the registries developers can access while using Docker Desktop. icon: "home_storage" - link: /security/for-admins/hardened-desktop/registry-access-management/ + link: /enterprise/security/hardened-desktop/registry-access-management/ - title: "Image Access Management" description: Control the images developers can pull from Docker Hub. icon: "photo_library" - link: /security/for-admins/hardened-desktop/image-access-management/ + link: /enterprise/security/hardened-desktop/image-access-management/ - title: "Air-Gapped Containers" description: Restrict containers from accessing unwanted network resources. icon: "vpn_lock" - link: /security/for-admins/hardened-desktop/air-gapped-containers/ + link: /enterprise/security/hardened-desktop/air-gapped-containers/ weight: 60 --- diff --git a/content/manuals/security/for-admins/hardened-desktop/air-gapped-containers.md b/content/manuals/enterprise/security/hardened-desktop/air-gapped-containers.md similarity index 94% rename from content/manuals/security/for-admins/hardened-desktop/air-gapped-containers.md rename to content/manuals/enterprise/security/hardened-desktop/air-gapped-containers.md index 49e0ea8a93fc..54d640486f6a 100644 --- a/content/manuals/security/for-admins/hardened-desktop/air-gapped-containers.md +++ b/content/manuals/enterprise/security/hardened-desktop/air-gapped-containers.md @@ -5,6 +5,7 @@ keywords: air gapped, security, Docker Desktop, configuration, proxy, network aliases: - /desktop/hardened-desktop/settings-management/air-gapped-containers/ - /desktop/hardened-desktop/air-gapped-containers/ + - /security/for-admins/hardened-desktop/air-gapped-containers/ --- {{< summary-bar feature_name="Air-gapped containers" >}} @@ -24,7 +25,7 @@ You can choose: ## Configuration -Assuming [enforced sign-in](/manuals/security/for-admins/enforce-sign-in/_index.md) and [Settings Management](settings-management/_index.md) are enabled, add the new proxy configuration to the `admin-settings.json` file. For example: +Assuming [enforced sign-in](/manuals/enterprise/security/enforce-sign-in/_index.md) and [Settings Management](settings-management/_index.md) are enabled, add the new proxy configuration to the `admin-settings.json` file. For example: ```json { @@ -34,7 +35,7 @@ Assuming [enforced sign-in](/manuals/security/for-admins/enforce-sign-in/_index. "mode": "manual", "http": "", "https": "", - "exclude": "", + "exclude": [], "pac": "http://192.168.1.16:62039/proxy.pac", "transparentPorts": "*" } @@ -52,6 +53,7 @@ The `containersProxy` setting describes the policy which is applied to traffic f > [!IMPORTANT] > > Any existing `proxy` setting in the `admin-settings.json` file continues to apply to traffic from the app on the host. +> If the PAC file download fails, the containers block the request to the target URL. ## Example PAC file diff --git a/content/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/_index.md b/content/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/_index.md similarity index 91% rename from content/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/_index.md rename to content/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/_index.md index 3f6453610b83..13402bab95d6 100644 --- a/content/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/_index.md +++ b/content/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/_index.md @@ -6,6 +6,7 @@ title: What is Enhanced Container Isolation? linkTitle: Enhanced Container Isolation aliases: - /desktop/hardened-desktop/enhanced-container-isolation/ + - /security/for-admins/hardened-desktop/enhanced-container-isolation/ weight: 20 --- @@ -15,7 +16,7 @@ Enhanced Container Isolation (ECI) provides an additional layer of security to p It uses a variety of advanced techniques to harden container isolation, but without impacting developer productivity. -Enhanced Container Isolation ensures stronger container isolation and also locks in any security configurations that have been created by administrators, for instance through [Registry Access Management policies](/manuals/security/for-admins/hardened-desktop/registry-access-management.md) or with [Settings Management](../settings-management/_index.md). +Enhanced Container Isolation ensures stronger container isolation and also locks in any security configurations that have been created by administrators, for instance through [Registry Access Management policies](/manuals/enterprise/security/hardened-desktop/registry-access-management.md) or with [Settings Management](../settings-management/_index.md). > [!NOTE] > @@ -51,9 +52,9 @@ For more information on how Enhanced Container Isolation work, see [How does it > [!IMPORTANT] > -> Enhanced Container Isolation does not yet fully protect Docker builds, -> Kubernetes pods and Extension containers. For more information on known -> limitations and workarounds, see [FAQs](faq.md). +> ECI protection for Docker builds and [Kubernetes in Docker Desktop](/manuals/desktop/features/kubernetes.md) varies according to the +> Docker Desktop version. Later versions include more protection than earlier versions. Also, ECI does not yet +> protect extension containers. For more information on known limitations and workarounds, see [FAQs](faq.md). ## How do I enable Enhanced Container Isolation? @@ -75,11 +76,11 @@ To enable Enhanced Container Isolation as a developer: #### Prerequisite -You first need to [enforce sign-in](/manuals/security/for-admins/enforce-sign-in/_index.md) to ensure that all Docker Desktop developers authenticate with your organization. Since Settings Management requires a Docker Business subscription, enforced sign-in guarantees that only authenticated users have access and that the feature consistently takes effect across all users, even though it may still work without enforced sign-in. +You first need to [enforce sign-in](/manuals/enterprise/security/enforce-sign-in/_index.md) to ensure that all Docker Desktop developers authenticate with your organization. Since Settings Management requires a Docker Business subscription, enforced sign-in guarantees that only authenticated users have access and that the feature consistently takes effect across all users, even though it may still work without enforced sign-in. #### Setup -[Create and configure the `admin-settings.json` file](/manuals/security/for-admins/hardened-desktop/settings-management/configure-json-file.md) and specify: +[Create and configure the `admin-settings.json` file](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md) and specify: ```json { @@ -112,7 +113,7 @@ For this to take effect: > [!TIP] > -> You can now also configure these settings in the [Docker Admin Console](/manuals/security/for-admins/hardened-desktop/settings-management/configure-admin-console.md). +> You can now also configure these settings in the [Docker Admin Console](/manuals/enterprise/security/hardened-desktop/settings-management/configure-admin-console.md). When Enhanced Container Isolation is enabled, users see: - **Use Enhanced Container Isolation** toggled on in **Settings** > **General**. diff --git a/content/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/config.md b/content/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/config.md similarity index 98% rename from content/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/config.md rename to content/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/config.md index 146b7362681a..a555f45cb3be 100644 --- a/content/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/config.md +++ b/content/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/config.md @@ -5,6 +5,7 @@ linkTitle: Advanced configuration keywords: enhanced container isolation, Docker Desktop, Docker socket, bind mount, configuration aliases: - /desktop/hardened-desktop/enhanced-container-isolation/config/ + - /security/for-admins/hardened-desktop/enhanced-container-isolation/config/ weight: 30 --- @@ -63,7 +64,7 @@ This can be done via the Docker Socket mount permissions section in the > [!TIP] > -> You can now also configure these settings in the [Docker Admin Console](/manuals/security/for-admins/hardened-desktop/settings-management/configure-admin-console.md). +> You can now also configure these settings in the [Docker Admin Console](/manuals/enterprise/security/hardened-desktop/settings-management/configure-admin-console.md). As shown above, there are two configurations for bind-mounting the Docker socket into containers: the `imageList` and the `commandList`. These are @@ -138,7 +139,7 @@ ones in the repository. ### Docker Socket Mount Permissions for derived images -{{< introduced desktop 4.34.0 "../../../../desktop/release-notes.md#4340" >}} +{{< summary-bar feature_name="Docker Scout Mount Permissions" >}} As described in the prior section, administrators can configure the list of container images that are allowed to mount the Docker socket via the `imageList`. diff --git a/content/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/faq.md b/content/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/faq.md similarity index 75% rename from content/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/faq.md rename to content/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/faq.md index a2f7048eee31..bdc77e4ef99e 100644 --- a/content/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/faq.md +++ b/content/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/faq.md @@ -6,6 +6,7 @@ keywords: enhanced container isolation, security, faq, sysbox, Docker Desktop toc_max: 2 aliases: - /desktop/hardened-desktop/enhanced-container-isolation/faq/ + - /security/for-admins/hardened-desktop/enhanced-container-isolation/faq/ weight: 40 --- @@ -69,22 +70,39 @@ See [ECI Docker socket mount permissions](config.md#docker-socket-mount-permissi Not yet. It protects all containers launched by users via `docker create` and `docker run`. -Prior to Docker Desktop 4.30, it did not protect containers implicitly used by -`docker build` with the `docker` build driver (the default driver). Starting -with Docker Desktop 4.30, it protects such containers, except for Docker Desktop -on WSL 2 (Windows hosts). +For containers implicitly created by `docker build` as well as Docker +Desktop's integrated Kubernetes, protection varies depending on the Docker +Desktop version (see the following two FAQs). -Note that ECI always protects containers used by `docker build`, when using the -[docker-container build driver](/manuals/build/builders/drivers/_index.md), since Docker -Desktop 4.19 and on all supported platforms (Windows with WSL 2 or Hyper-V, Mac, -and Linux). +### Does ECI protect containers implicitly used by `docker build`? -ECI does not yet protect Docker Desktop Kubernetes pods, Extension containers, -and [Dev Environments containers](/manuals/desktop/features/dev-environments/_index.md). +Prior to Docker Desktop 4.19, ECI did not protect containers used implicitly +by `docker build` during the build process. + +Since Docker Desktop 4.19, ECI protects containers used by `docker build` +when using the [Docker container driver](/manuals/build/builders/drivers/_index.md). + +In addition, since Docker Desktop 4.30, ECI also protects containers used by +`docker build` when using the default "docker" build driver, on all +platforms supported by Docker Desktop except Windows with WSL 2. + +### Does ECI protect Kubernetes in Docker Desktop? + +Prior to Docker Desktop 4.38, ECI did not protect the Kubernetes cluster +integrated in Docker Desktop. + +Since Docker Desktop 4.38, ECI protects the integrated Kubernetes cluster +when using the new **kind** provisioner (see [Deploy On Kubernetes](/manuals/desktop/features/kubernetes.md)). +In this case, each node in the multi-node Kubernetes cluster is actually an ECI +protected container. With ECI disabled, each node in the Kubernetes cluster is +a less-secure fully privileged container. + +ECI does not protect the integrated Kubernetes cluster when using the +older **Kubeadm** single-node cluster provisioner. ### Does ECI protect containers launched prior to enabling ECI? -No. Containers created prior to switching on ECI are not protected. Therefore, it is +No. Containers created prior to switching on ECI are not protected. Therefore, it is recommended you remove all containers prior to switching on ECI. ### Does ECI affect the performance of containers? diff --git a/content/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/features-benefits.md b/content/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/features-benefits.md similarity index 99% rename from content/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/features-benefits.md rename to content/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/features-benefits.md index 832b5ee30841..c640aa18640b 100644 --- a/content/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/features-benefits.md +++ b/content/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/features-benefits.md @@ -4,6 +4,7 @@ title: Key features and benefits keywords: set up, enhanced container isolation, rootless, security, features, Docker Desktop aliases: - /desktop/hardened-desktop/enhanced-container-isolation/features-benefits/ + - /security/for-admins/hardened-desktop/enhanced-container-isolation/features-benefits/ weight: 20 --- diff --git a/content/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/how-eci-works.md b/content/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/how-eci-works.md similarity index 98% rename from content/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/how-eci-works.md rename to content/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/how-eci-works.md index 2a1c6c8b86e7..8ee8ddb314df 100644 --- a/content/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/how-eci-works.md +++ b/content/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/how-eci-works.md @@ -4,6 +4,7 @@ title: How does it work? keywords: set up, enhanced container isolation, rootless, security aliases: - /desktop/hardened-desktop/enhanced-container-isolation/how-eci-works/ + - /security/for-admins/hardened-desktop/enhanced-container-isolation/how-eci-works/ weight: 10 --- diff --git a/content/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/limitations.md b/content/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/limitations.md similarity index 93% rename from content/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/limitations.md rename to content/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/limitations.md index 758e5389701d..19b3d85b0af6 100644 --- a/content/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/limitations.md +++ b/content/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/limitations.md @@ -4,15 +4,17 @@ description: Limitations of Enhanced Container Isolation keywords: enhanced container isolation, security, sysbox, known issues, Docker Desktop toc_max: 2 weight: 50 +aliases: + - /security/for-admins/hardened-desktop/enhanced-container-isolation/limitations/ --- ### ECI support for WSL > [!NOTE] > -> Docker Desktop requires WSL 2 version 1.1.3.0 or later. To get the current +> Docker Desktop requires WSL 2 version 2.1.5 or later. To get the current > version of WSL on your host, type `wsl --version`. If the command fails or if -> it returns a version number prior to 1.1.3.0, update WSL to the latest version +> it returns a version number prior to 2.1.5, update WSL to the latest version > by typing `wsl --update` in a Windows command or PowerShell terminal. ECI on WSL is not as secure as on Hyper-V because: @@ -83,14 +85,9 @@ arrangements are needed, just enable ECI and run the KinD tool as usual. Extension containers are also not yet protected by ECI. Ensure you extension containers come from trusted entities to avoid issues. -### Docker Desktop Dev Environments are not yet protected - -Containers launched by the Docker Desktop Dev Environments feature are not yet -protected. - ### Docker Debug containers are not yet protected -[Docker Debug](https://docs.docker.com/reference/cli/docker/debug/) containers +[Docker Debug](/reference/cli/docker/debug.md) containers are not yet protected by ECI. ### Native Windows containers are not supported diff --git a/content/manuals/security/for-admins/hardened-desktop/image-access-management.md b/content/manuals/enterprise/security/hardened-desktop/image-access-management.md similarity index 84% rename from content/manuals/security/for-admins/hardened-desktop/image-access-management.md rename to content/manuals/enterprise/security/hardened-desktop/image-access-management.md index 6c5fd3a2745e..06dd8dea425a 100644 --- a/content/manuals/security/for-admins/hardened-desktop/image-access-management.md +++ b/content/manuals/enterprise/security/hardened-desktop/image-access-management.md @@ -8,6 +8,7 @@ aliases: - /desktop/hardened-desktop/image-access-management/ - /admin/organization/image-access/ - /security/for-admins/image-access-management/ + - /security/for-admins/hardened-desktop/image-access-management/ weight: 40 --- @@ -19,21 +20,25 @@ For example, a developer, who is part of an organization, building a new contain ## Prerequisites -You first need to [enforce sign-in](/manuals/security/for-admins/enforce-sign-in/_index.md) to ensure that all Docker Desktop developers authenticate with your organization. Since Image Access Management requires a Docker Business subscription, enforced sign-in guarantees that only authenticated users have access and that the feature consistently takes effect across all users, even though it may still work without enforced sign-in. +You first need to [enforce sign-in](/manuals/enterprise/security/enforce-sign-in/_index.md) to ensure that all Docker Desktop developers authenticate with your organization. Since Image Access Management requires a Docker Business subscription, enforced sign-in guarantees that only authenticated users have access and that the feature consistently takes effect across all users, even though it may still work without enforced sign-in. + +> [!IMPORTANT] +> +> You must use [personal access tokens (PATs)](/manuals/security/access-tokens.md) with Image Access Management. Organization access tokens (OATs) are not compatible. ## Configure {{< tabs >}} -{{< tab name="Docker Hub" >}} +{{< tab name="Admin Console" >}} -{{% admin-image-access product="hub" %}} +{{% admin-image-access product="admin" %}} {{< /tab >}} -{{< tab name="Admin Console" >}} +{{< tab name="Docker Hub" >}} -{{< include "admin-early-access.md" >}} +{{% include "hub-org-management.md" %}} -{{% admin-image-access product="admin" %}} +{{% admin-image-access product="hub" %}} {{< /tab >}} {{< /tabs >}} diff --git a/content/manuals/enterprise/security/hardened-desktop/registry-access-management.md b/content/manuals/enterprise/security/hardened-desktop/registry-access-management.md new file mode 100644 index 000000000000..520cc71d58ea --- /dev/null +++ b/content/manuals/enterprise/security/hardened-desktop/registry-access-management.md @@ -0,0 +1,107 @@ +--- +description: Control access to approved registries with Registry Access Management, ensuring secure Docker Desktop usage +keywords: registry, access, management, permissions, Docker Business feature, security, admin +title: Registry Access Management +tags: [admin] +aliases: + - /desktop/hardened-desktop/registry-access-management/ + - /admin/organization/registry-access/ + - /docker-hub/registry-access-management/ + - /security/for-admins/registry-access-management/ + - /security/for-admins/hardened-desktop/registry-access-management/ +weight: 30 +--- + +{{< summary-bar feature_name="Registry access management" >}} + +With Registry Access Management (RAM), administrators can ensure that their +developers using Docker Desktop only access allowed registries. This is done +through the Registry Access Management dashboard in Docker Hub or the +Docker Admin Console. + +Registry Access Management supports both cloud and on-prem registries. This +feature operates at the DNS level and therefore is compatible with all +registries. You can add any hostname or domain name you’d like to include in the +list of allowed registries. However, if the registry redirects to other domains +such as `s3.amazon.com`, then you must add those domains to the list. + +Example registries administrators can allow include: + + - Docker Hub. This is enabled by default. + - Amazon ECR + - GitHub Container Registry + - Google Container Registry + - GitLab Container Registry + - Nexus + - Artifactory + +## Prerequisites + +You must [enforce sign-in](../enforce-sign-in/_index.md). For Registry Access +Management to take effect, Docker Desktop users must authenticate to your +organization. Enforcing sign-in ensures that your Docker Desktop developers +always authenticate to your organization, even though they can authenticate +without it and the feature will take effect. Enforcing sign-in guarantees the +feature always takes effect. + +> [!IMPORTANT] +> +> You must use [personal access tokens (PATs)](/manuals/security/access-tokens.md) with Registry Access Management. Organization access tokens (OATs) are not compatible. + +## Configure Registry Access Management permissions + +{{< tabs >}} +{{< tab name="Admin Console" >}} + +{{% admin-registry-access product="admin" %}} + +{{< /tab >}} +{{< tab name="Docker Hub" >}} + +{{% include "hub-org-management.md" %}} + +{{% admin-registry-access product="hub" %}} + +{{< /tab >}} +{{< /tabs >}} + +## Verify the restrictions + +The new Registry Access Management policy takes effect after the developer +successfully authenticates to Docker Desktop using their organization +credentials. If a developer attempts to pull an image from a disallowed +registry via the Docker CLI, they receive an error message that the organization +has disallowed this registry. + +## Caveats + +There are certain limitations when using Registry Access Management: + +- You can add up to 100 registries/domains. +- Windows image pulls and image builds are not restricted by default. For +Registry Access Management to take effect on Windows Container mode, you must +allow the Windows Docker daemon to use Docker Desktop's internal proxy by +selecting the [Use proxy for Windows Docker daemon](/manuals/desktop/settings-and-maintenance/settings.md#proxies) +setting. +- Builds such as `docker buildx` using a Kubernetes driver are not restricted. +- Builds such as `docker buildx` using a custom docker-container driver are not +restricted. +- Blocking is DNS-based. You must use a registry's access control mechanisms to +distinguish between “push” and “pull”. +- WSL 2 requires at least a 5.4 series Linux kernel (this does not apply to +earlier Linux kernel series). +- Under the WSL 2 network, traffic from all Linux distributions is restricted. +This will be resolved in the updated 5.15 series Linux kernel. +- Images pulled by Docker Desktop when Docker Debug or Kubernetes is enabled, +are not restricted by default even if Docker Hub is blocked by RAM. +- If Docker Hub access is restricted by RAM, pulls on images originating from Docker Hub are restricted even if the image has been previously cached by a registry mirror. See [Using Registry Access Management (RAM) with a registry mirror](/manuals/docker-hub/image-library/mirror.md). + +Also, Registry Access Management operates on the level of hosts, not IP +addresses. Developers can bypass this restriction within their domain +resolution, for example by running Docker against a local proxy or modifying +their operating system's `sts` file. Blocking these forms of manipulation is +outside the remit of Docker Desktop. + +## More resources + +- [Video: Hardened Desktop Registry Access Management](https://www.youtube.com/watch?v=l9Z6WJdJC9A) diff --git a/content/manuals/enterprise/security/hardened-desktop/settings-management/_index.md b/content/manuals/enterprise/security/hardened-desktop/settings-management/_index.md new file mode 100644 index 000000000000..1dde5103df72 --- /dev/null +++ b/content/manuals/enterprise/security/hardened-desktop/settings-management/_index.md @@ -0,0 +1,86 @@ +--- +description: Understand how Settings Management works, who it is for, and what the + benefits are +keywords: Settings Management, rootless, docker desktop, hardened desktop +tags: [admin] +title: What is Settings Management? +linkTitle: Settings Management +aliases: + - /desktop/hardened-desktop/settings-management/ + - /security/for-admins/hardened-desktop/settings-management/ +weight: 10 +--- + +{{< summary-bar feature_name="Hardened Docker Desktop" >}} + +Settings Management lets administrators configure and enforce Docker Desktop +settings across end-user machines. It helps maintain consistent configurations +and enhances security within your organization. + +## Who is it for? + +Settings Management is designed for organizations that: + +- Require centralized control over Docker Desktop configurations. +- Aim to standardize Docker Desktop environments across teams. +- Operate in regulated environments and need to enforce compliance. + +This feature is available with a Docker Business subscription. + +## How it works + +Administrators can define settings using one of the following methods: + +- [Admin Console](/manuals/enterprise/security/hardened-desktop/settings-management/configure-admin-console.md): Create and assign settings policies through the +Docker Admin Console. +- [`admin-settings.json` file](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md): Place a configuration file on the +user's machine to enforce settings. + +Enforced settings override user-defined configurations and can't be modified +by developers. + +## Configurable settings + +Settings Management supports a broad range of Docker Desktop features, +including proxies, network configurations, and container isolation. + +For a full list of settings you can enforce, see the [Settings reference](/manuals/enterprise/security/hardened-desktop/settings-management/settings-reference.md). + +## Set up Settings Management + +1. [Enforce sign-in](/manuals/enterprise/security/enforce-sign-in/_index.md) to +ensure all developers authenticate with your organization. +2. Choose a configuration method: + - Use the `--admin-settings` installer flag on [macOS](/manuals/desktop/setup/install/mac-install.md#install-from-the-command-line) or [Windows](/manuals/desktop/setup/install/windows-install.md#install-from-the-command-line) to automatically create the `admin-settings.json`. + - Manually create and configure the [`admin-settings.json` file](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md). + - Create a settings policy in the [Docker Admin Console](configure-admin-console.md). + +After configuration, developers receive the enforced setting when they: + +- Quit and relaunch Docker Desktop, then sign in. +- Launch and sign in to Docker Desktop for the first time. + +> [!NOTE] +> +> Docker Desktop does not automatically prompt users to restart or re-authenticate +after a settings change. + +## Developer experience + +When settings are enforced: + +- Options appear grayed out in Docker Desktop and can't be modified via the +Dashboard, CLI, or configuration files. +- If Enhanced Container Isolation is enabled, developers can't use privileged +containers or similar methods to alter enforced settings within the Docker +Desktop Linux VM. + +## What's next? + +- [Configure Settings Management with the `admin-settings.json` file](configure-json-file.md) +- [Configure Settings Management with the Docker Admin Console](configure-admin-console.md) + +## Learn more + +- To see how each Docker Desktop setting maps across the Docker Dashboard, `admin-settings.json` file, and Admin Console, see the [Settings reference](settings-reference.md). +- Read the [Settings Management blog post](https://www.docker.com/blog/settings-management-for-docker-desktop-now-generally-available-in-the-admin-console/). diff --git a/content/manuals/enterprise/security/hardened-desktop/settings-management/compliance-reporting.md b/content/manuals/enterprise/security/hardened-desktop/settings-management/compliance-reporting.md new file mode 100644 index 000000000000..600cca8e1c7e --- /dev/null +++ b/content/manuals/enterprise/security/hardened-desktop/settings-management/compliance-reporting.md @@ -0,0 +1,146 @@ +--- +description: Understand how to use the Desktop settings reporting dashboard +keywords: Settings Management, docker desktop, hardened desktop, reporting, compliance +title: Desktop settings reporting +linkTitle: Desktop settings reporting +weight: 30 +params: + sidebar: + badge: + color: violet + text: EA +aliases: + - /security/for-admins/hardened-desktop/settings-management/compliance-reporting/ +--- + +{{< summary-bar feature_name="Compliance reporting" >}} + +Desktop settings reporting is a feature of Desktop Settings Management that +tracks and reports user compliance with the settings policies that are assigned +to them. This lets administrators track the application of settings and +monitor what actions they need to take to make users compliant. + +This guide provides steps for accessing Desktop settings reporting, viewing +compliance status, and resolving non-compliant users. + +## Access Desktop settings reporting + +> [!IMPORTANT] +> +> Desktop settings reporting is in Early Access and is being rolled out +> gradually. You may not see this setting in the Admin Console yet. + +1. Sign in to [Docker Home](https://app.docker.com) and select +your organization. +1. Select **Admin Console**, then **Desktop settings reporting**. + +This opens the Desktop settings reporting page. From here you can: + +- Use the **Search** field to search by username or email address +- Filter by policies +- Hide or un-hide compliant users +- View a user’s compliance status and what policy is assigned to the user +- Download a CSV file of user compliance information + +## View compliance status + +> [!WARNING] +> +> Users on Docker Desktop versions older than 4.40 may appear non-compliant +> because older versions can't report compliance. To ensure accurate +> compliance status, users must update to Docker Desktop version 4.40 and later. + +1. Sign in to [Docker Home](https://app.docker.com) and select +your organization. +1. Select **Admin Console**, then **Desktop settings reporting**. +1. Optional. Select the **Hide compliant users** checkbox to show both compliant +and non-compliant users. +1. Use the **Search** field to search by username or email address. +1. Hover over a user’s compliance status indicator to quickly view their status. +1. Select a username to view more details about their compliance status, and for +steps to resolve non-compliant users. + +## Understand compliance status + +Docker evaluates compliance status based on: + +- Compliance status: Whether a user has fetched and applied the latest settings. This is the primary label shown on the reporting page. +- Domain status: Whether the user's email matches a verified domain. +- Settings status: Whether a settings policy is applied to the user. + +The combination of these statuses determines what actions you need to take. + +### Compliance status reference + +This reference explains how each status is determined in the reporting dashboard +based on user domain and settings data. The Admin Console displays the +highest-priority applicable status according to the following rules. + +**Compliance status** + +| Compliance status | What it means | +|-------------------|---------------| +| Uncontrolled domain | The user's email domain is not verified. | +| No policy assigned | The user does not have any policy assigned to them. | +| Non-compliant | The user fetched the correct policy, but hasn't applied it. | +| Outdated | The user fetched a previous version of the policy. | +| Compliant | The user fetched and applied the latest assigned policy. | + +**Domain status** + +This reflects how the user’s email domain is evaluated based on the organization’s domain setup. + +| Domain status | What it means | +|---------------|---------------| +| Verified | The user’s email domain is verified. | +| Guest user | The user's email domain is not verified. | +| Domainless | Your organization has no verified domains, and the user's domain is unknown. | + +**Settings status** + +This shows whether and how the user is assigned a settings policy. + +| Settings status | What it means | +|-----------------|---------------| +| Global policy | The user is assigned your organzation's default policy. | +| User policy | The user is assigned a specific custom policy. | +| No policy assigned | The user is not assigned to any policy. | + +## Resolve compliance status + +To resolve compliance status, you must view a user's compliance status details +by selecting their username from the Desktop settings reporting page. +These details include the following information: + +- **Compliance status**: Indicates whether the user is compliant with the +settings applied to them +- **Domain status**: Indicates whether the user’s email address is associated +with a verified domain +- **Settings status**: Indicates whether the user has settings applied to them +- **Resolution steps**: If a user is non-compliant, this provides information +on how to resolve the user’s compliance status + +### Compliant + +When a user is compliant, a **Compliant** icon appears next to their name on the +Desktop settings reporting dashboard. Select a compliant user to open their +compliance status details. Compliant users have the following status details: + +- **Compliance status**: Compliant +- **Domain status**: Verified +- **Settings status**: Global policy or user policy +- **User is compliant** indicator + +No resolution steps are needed for compliant users. + +### Non-compliant + +When a user is non-compliant, a **Non-compliant** or **Unknown** icon appears +next to their name on the Desktop settings reporting dashboard. Non-compliant +users must have their compliance status resolved: + +1. Select a username from the Desktop settings reporting dashboard. +1. On the compliance status details page, follow the resolution steps provided +to resolve the compliance status. +1. Refresh the page to ensure the resolution steps resolved the compliance +status. diff --git a/content/manuals/enterprise/security/hardened-desktop/settings-management/configure-admin-console.md b/content/manuals/enterprise/security/hardened-desktop/settings-management/configure-admin-console.md new file mode 100644 index 000000000000..4846e6034b96 --- /dev/null +++ b/content/manuals/enterprise/security/hardened-desktop/settings-management/configure-admin-console.md @@ -0,0 +1,87 @@ +--- +description: How to configure Settings Management for Docker Desktop using the Docker Admin Console +keywords: admin, controls, rootless, enhanced container isolation +title: Configure Settings Management with the Admin Console +linkTitle: Use the Admin Console +weight: 20 +aliases: + - /security/for-admins/hardened-desktop/settings-management/configure-admin-console/ +--- + +{{< summary-bar feature_name="Admin Console" >}} + +This page explains how administrators can use the Docker Admin Console to create +and apply settings policies for Docker Desktop. These policies help standardize +and secure Docker Desktop environments across your organization. + +## Prerequisites + +- [Install Docker Desktop 4.37.1 or later](/manuals/desktop/release-notes.md). +- [Verify your domain](/manuals/enterprise/security/single-sign-on/configure.md#step-one-add-and-verify-your-domain). +- [Enforce sign-in](/manuals/enterprise/security/enforce-sign-in/_index.md) to +ensure users authenticate to your organization. +- A Docker Business subscription is required. + +> [!IMPORTANT] +> +> You must add users to your verified domain for settings to take effect. + +## Create a settings policy + +1. Sign in to [Docker Home](https://app.docker.com/) and select +your organization. +1. Select **Admin Console**, then **Desktop Settings Management**. +1. Select **Create a settings policy**. +1. Provide a name and optional description. + + > [!TIP] + > + > You can upload an existing `admin-settings.json` file to pre-fill the form. + Admin Console policies override local `admin-settings.json` files. + +1. Choose who the policy applies to: + - All users + - Specific users + + > [!NOTE] + > + > User-specific policies override the global default. Test your policy with + a few users before rolling it out globally. + +1. Configure the state for each setting: + - **User-defined**: Users can change the setting. + - **Always enabled**: Setting is on and locked. + - **Enabled**: Setting is on but can be changed. + - **Always disabled**: Setting is off and locked. + - **Disabled**: Setting is off but can be changed. + + > [!TIP] + > + > For a complete list of available settings, their supported platforms, and which configuration methods they work with, see the [Settings reference](settings-reference.md). + +1. Select **Create**. + +To apply the policy: + +- New installs: Launch Docker Desktop and sign in. +- Existing installs: Fully quit and relaunch Docker Desktop. + +> [!IMPORTANT] +> +> Restarting from the Docker Desktop menu isn't enough. Users must fully quit +and relaunch Docker Desktop. + +Docker Desktop checks for policy updates at launch and every 60 minutes. To roll +back a policy, either delete it or set individual settings to **User-defined**. + +## Manage policies + +From the **Actions** menu on the **Settings Management** page, you can: + +- Edit or delete an existing settings policy +- Export a settings policy as an `admin-settings.json` file +- Promote a user-specific policy to be the new global default + +## Learn more + +To see how each Docker Desktop setting maps across the Docker Dashboard, `admin-settings.json` file, and Admin Console, see the [Settings reference](settings-reference.md). \ No newline at end of file diff --git a/content/manuals/security/for-admins/hardened-desktop/settings-management/configure-json-file.md b/content/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md similarity index 54% rename from content/manuals/security/for-admins/hardened-desktop/settings-management/configure-json-file.md rename to content/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md index 009b21e0b9f7..2bb9ea66fe30 100644 --- a/content/manuals/security/for-admins/hardened-desktop/settings-management/configure-json-file.md +++ b/content/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md @@ -4,54 +4,83 @@ keywords: admin, controls, rootless, enhanced container isolation title: Configure Settings Management with a JSON file linkTitle: Use a JSON file weight: 10 -aliases: +aliases: - /desktop/hardened-desktop/settings-management/configure/ - /security/for-admins/hardened-desktop/settings-management/configure/ + - /security/for-admins/hardened-desktop/settings-management/configure-json-file/ --- {{< summary-bar feature_name="Hardened Docker Desktop" >}} -This page contains information on how to configure Settings Management with an `admin-settings.json` file. You can specify and lock configuration parameters to create a standardized Docker Desktop environment across your company or organization. - -Settings Management is designed specifically for organizations who don’t give developers root access to their machines. +This page explains how to use an `admin-settings.json` file to configure and +enforce Docker Desktop settings. Use this method to standardize Docker +Desktop environments in your organization. ## Prerequisites -You first need to [enforce sign-in](/manuals/security/for-admins/enforce-sign-in/_index.md) to ensure that all Docker Desktop developers authenticate with your organization. Since Settings Management requires a Docker Business subscription, enforced sign-in guarantees that only authenticated users have access and that the feature consistently takes effect across all users, even though it may still work without enforced sign-in. +- [Enforce sign-in](/manuals/enterprise/security/enforce-sign-in/_index.md) to +ensure all users authenticate with your organization. +- A Docker Business subscription is required. -## Step one: Create the `admin-settings.json` file and save it in the correct location +Docker Desktop only applies settings from the `admin-settings.json` file if both +authentication and Docker Business license checks succeed. -You can either use the `--admin-settings` installer flag on [macOS](/manuals/desktop/setup/install/mac-install.md#install-from-the-command-line) or [Windows](/manuals/desktop/setup/install/windows-install.md#install-from-the-command-line) to automatically create the `admin-settings.json` and save it in the correct location, or set it up manually. +> [!IMPORTANT] +> +> If a user isn't signed in or isn't part of a Docker Business organization, +the settings file is ignored. -To set it up manually: -1. Create a new, empty JSON file and name it `admin-settings.json`. -2. Save the `admin-settings.json` file on your developers' machines in the following locations: - - Mac: `/Library/Application\ Support/com.docker.docker/admin-settings.json` - - Windows: `C:\ProgramData\DockerDesktop\admin-settings.json` - - Linux: `/usr/share/docker-desktop/admin-settings.json` +## Limitation - By placing this file in a protected directory, developers are unable to modify it. +- The `admin-settings.json` file doesn't work in air-gapped or offline +environments. +- The file is not compatible with environments that restrict authentication +with Docker Hub. - > [!IMPORTANT] - > - > It is assumed that you have the ability to push the `admin-settings.json` settings file to the locations specified through a device management software such as [Jamf](https://www.jamf.com/lp/en-gb/apple-mobile-device-management-mdm-jamf-shared/?attr=google_ads-brand-search-shared&gclid=CjwKCAjw1ICZBhAzEiwAFfvFhEXjayUAi8FHHv1JJitFPb47C_q_RCySTmF86twF1qJc_6GST-YDmhoCuJsQAvD_BwE). +## Step one: Create the settings file -## Step two: Configure the settings you want to lock in +You can: -> [!NOTE] +- Use the `--admin-settings` installer flag to auto-generate the file. See: + - [macOS](/manuals/desktop/setup/install/mac-install.md#install-from-the-command-line) install guide + - [Windows](/manuals/desktop/setup/install/windows-install.md#install-from-the-command-line) install guide +- Or create it manually and place it in the following locations: + - Mac: `/Library/Application\ Support/com.docker.docker/admin-settings.json` + - Windows: `C:\ProgramData\DockerDesktop\admin-settings.json` + - Linux: `/usr/share/docker-desktop/admin-settings.json` + +> [!IMPORTANT] +> +> Place the file in a protected directory to prevent modification. Use MDM tools +like [Jamf](https://www.jamf.com/lp/en-gb/apple-mobile-device-management-mdm-jamf-shared/?attr=google_ads-brand-search-shared&gclid=CjwKCAjw1ICZBhAzEiwAFfvFhEXjayUAi8FHHv1JJitFPb47C_q_RCySTmF86twF1qJc_6GST-YDmhoCuJsQAvD_BwE) to distribute it at scale. + +## Step two: Define settings + +> [!TIP] > -> Some of the configuration parameters only apply to certain platforms or to specific Docker Desktop versions. This is highlighted in the following table. +> For a complete list of available settings, their supported platforms, and which configuration methods they work with, see the [Settings reference](settings-reference.md). + +The `admin-settings.json` file uses structured keys to define what can +be configured and whether the values are enforced. -The `admin-settings.json` file requires a nested list of configuration parameters, each of which must contain the `locked` parameter. You can add or remove configuration parameters as per your requirements. +Each setting supports the `locked` field. When `locked` is set to `true`, users +can't change that value in Docker Desktop, the CLI, or config files. When +`locked` is set to `false`, the value acts like a default suggestion and users +can still update it. -If `locked: true`, users aren't able to edit this setting from Docker Desktop or the CLI. +Settings where `locked` is set to `false` are ignored on existing installs if +a user has already customized that value in `settings-store.json`, +`settings.json`, or `daemon.json`. -If `locked: false`, it's similar to setting a factory default in that: - - For new installs, `locked: false` pre-populates the relevant settings in the Docker Desktop Dashboard, but users are able to modify it. +> [!NOTE] +> +> Some settings are platform-specific or require a minimum Docker Desktop +version. See the [Settings reference](/manuals/enterprise/security/hardened-desktop/settings-management/settings-reference.md) for details. - - If Docker Desktop is already installed and being used, `locked: false` is ignored. This is because existing users of Docker Desktop may have already updated a setting, which in turn will have been written to the relevant config file, for example the `settings-store.json` (or `settings.json` for Docker Desktop versions 4.34 and earlier) or `daemon.json`. In these instances, the user's preferences are respected and the values aren't altered. These can be controlled by setting `locked: true`. +### Example settings file -The following `admin-settings.json` code and table provides an example of the required syntax and descriptions for parameters and values: +The following file is an example `admin-settings.json` file. For a full list +of configurable settings for the `admin-settings.json` file, see [`admin-settings.json` configurations](#admin-settingsjson-configurations). ```json {collapse=true} { @@ -137,10 +166,6 @@ The following `admin-settings.json` code and table provides an example of the re "sbomIndexing": true, "useBackgroundIndexing": true }, - "allowExperimentalFeatures": { - "locked": false, - "value": false - }, "allowBetaFeatures": { "locked": false, "value": false @@ -182,7 +207,21 @@ The following `admin-settings.json` code and table provides an example of the re } ``` -### General +## Step three: Restart and apply settings + +Settings apply after Docker Desktop is restarted and the user is signed in. + +- New installs: Launch Docker Desktop and sign in. +- Existing installs: Quit Docker Desktop fully and relaunch it. + +> [!IMPORTANT] +> +> Restarting Docker Desktop from the menu isn't enough. It must be fully +quit and reopened. + +## `admin-settings.json` configurations + +### General |Parameter|OS|Description|Version| |:-------------------------------|---|:-------------------------------|---| @@ -195,7 +234,7 @@ The following `admin-settings.json` code and table provides an example of the re | `desktopTerminalEnabled` | | If `value` is set to `false`, developers cannot use the Docker terminal to interact with the host machine and execute commands directly from Docker Desktop. | | |`exposeDockerAPIOnTCP2375`| Windows only| Exposes the Docker API on a specified port. If `value` is set to true, the Docker API is exposed on port 2375. Note: This is unauthenticated and should only be enabled if protected by suitable firewall rules.| | -### File sharing and emulation +### File sharing and emulation |Parameter|OS|Description|Version| |:-------------------------------|---|:-------------------------------|---| @@ -241,24 +280,51 @@ The following `admin-settings.json` code and table provides an example of the re |        `dockerDaemonOptions` | | Overrides the options in the Linux daemon config file. See the [Docker Engine reference](/reference/cli/dockerd/#daemon-configuration-file).| | > [!NOTE] -> +> > This setting is not available to configure via the Docker Admin Console. ### Kubernetes |Parameter|OS|Description|Version| |:-------------------------------|---|:-------------------------------|---| -|`kubernetes`| | If `enabled` is set to true, a Kubernetes single-node cluster is started when Docker Desktop starts. If `showSystemContainers` is set to true, Kubernetes containers are displayed in the Docker Desktop Dashboard and when you run `docker ps`. `imagesRepository` lets you specify which repository Docker Desktop pulls the Kubernetes images from. For example, `"imagesRepository": "registry-1.docker.io/docker"`. | | +|`kubernetes`| | If `enabled` is set to true, a Kubernetes single-node cluster is started when Docker Desktop starts. If `showSystemContainers` is set to true, Kubernetes containers are displayed in the Docker Desktop Dashboard and when you run `docker ps`. The [imagesRepository](../../../../desktop/features/kubernetes.md#configuring-a-custom-image-registry-for-kubernetes-control-plane-images) setting lets you specify which repository Docker Desktop pulls control-plane Kubernetes images from. | | + +> [!NOTE] +> +> When using the `imagesRepository` setting and Enhanced Container Isolation (ECI), add the following images to the [ECI Docker socket mount image list](#enhanced-container-isolation): +> +> * [imagesRepository]/desktop-cloud-provider-kind:* +> * [imagesRepository]/desktop-containerd-registry-mirror:* +> +> These containers mount the Docker socket, so you must add the images to the ECI images list. If not, ECI will block the mount and Kubernetes won't start. -### Features in development +### Networking |Parameter|OS|Description|Version| |:-------------------------------|---|:-------------------------------|---| -| `allowExperimentalFeatures`| | If `value` is set to `false`, experimental features are disabled.| | -| `allowBetaFeatures`| | If `value` is set to `false`, beta features are disabled.| | -| `enableDockerAI` | | If `value` is set to `false`, Docker AI (Ask Gordon) features are disabled. | | +| `defaultNetworkingMode` | Windows and Mac only | Defines the default IP protocol for new Docker networks: `dual-stack` (IPv4 + IPv6, default), `ipv4only`, or `ipv6only`. | Docker Desktop version 4.43 and later. | +| `dnsInhibition` | Windows and Mac only | Controls DNS record filtering returned to containers. Options: `auto` (recommended), `ipv4`, `ipv6`, `none`| Docker Desktop version 4.43 and later. | + +For more information, see [Networking](/manuals/desktop/features/networking.md#networking-mode-and-dns-behaviour-for-mac-and-windows). + +### Beta features + +> [!IMPORTANT] +> +> For Docker Desktop versions 4.41 and earlier, some of these settings lived under the **Experimental features** tab on the **Features in development** page. -### Enhanced Container Isolation +| Parameter | OS | Description | Version | +|:-----------------------------------------------------|----|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------| +| `allowBetaFeatures` | | If `value` is set to `true`, beta features are enabled. | | +| `enableDockerAI` | | If `allowBetaFeatures` is true, setting `enableDockerAI` to `true` enables [Docker AI (Ask Gordon)](/manuals/ai/gordon/_index.md) by default. You can independently control this setting from the `allowBetaFeatures` setting. | | +| `enableInference` | | If `allowBetaFeatures` is true, setting `enableInference` to `true` enables [Docker Model Runner](/manuals/ai/model-runner/_index.md) by default. You can independently control this setting from the `allowBetaFeatures` setting. | | +|         `enableInferenceTCP` | | Enable host-side TCP support. This setting requires Docker Model Runner setting to be enabled first. | | +|         `enableInferenceTCPPort` | | Specifies the exposed TCP port. This setting requires Docker Model Runner setting to be enabled first. | | +|         `enableInferenceCORS` | | Specifies the allowed CORS origins. Empty string to deny all,`*` to accept all, or a list of comma-separated values. This setting requires Docker Model Runner setting to be enabled first. | | +| `enableDockerMCPToolkit` | | If `allowBetaFeatures` is true, setting `enableDockerMCPToolkit` to `true` enables the [MCP toolkit feature](/manuals/ai/mcp-catalog-and-toolkit/toolkit.md) by default. You can independently control this setting from the `allowBetaFeatures` setting. | | +| `allowExperimentalFeatures` | | If `value` is set to `true`, experimental features are enabled. | Docker Desktop version 4.41 and earlier | + +### Enhanced Container Isolation |Parameter|OS|Description|Version| |:-------------------------------|---|:-------------------------------|---| @@ -266,22 +332,3 @@ The following `admin-settings.json` code and table provides an example of the re |        `dockerSocketMount` | | By default, enhanced container isolation blocks bind-mounting the Docker Engine socket into containers (e.g., `docker run -v /var/run/docker.sock:/var/run/docker.sock ...`). This lets you relax this in a controlled way. See [ECI Configuration](../enhanced-container-isolation/config.md) for more info. | | |               `imageList` | | Indicates which container images are allowed to bind-mount the Docker Engine socket. | | |               `commandList` | | Restricts the commands that containers can issue via the bind-mounted Docker Engine socket. | | - -## Step three: Re-launch Docker Desktop - -> [!NOTE] -> -> Test the changes made through the `admin-settings.json` file locally to see if the settings work as expected. - -For settings to take effect: -- On a new install, developers need to launch Docker Desktop and authenticate to their organization. -- On an existing install, developers need to quit Docker Desktop through the Docker menu, and then re-launch Docker Desktop. If they are already signed in, they don't need to sign in again for the changes to take effect. - > [!IMPORTANT] - > - > Selecting **Restart** from the Docker menu isn't enough as it only restarts some components of Docker Desktop. - -So as not to disrupt your developers' workflow, Docker doesn't automatically mandate that developers re-launch and re-authenticate once a change has been made. - -In Docker Desktop, developers see the relevant settings grayed out and the message **Locked by your administrator**. - -![Proxy settings grayed out with Settings Management](/assets/images/grayed-setting.png) diff --git a/content/manuals/enterprise/security/hardened-desktop/settings-management/settings-reference.md b/content/manuals/enterprise/security/hardened-desktop/settings-management/settings-reference.md new file mode 100644 index 000000000000..ac89fd75a3fe --- /dev/null +++ b/content/manuals/enterprise/security/hardened-desktop/settings-management/settings-reference.md @@ -0,0 +1,1198 @@ +--- +description: Reference for all settings and features that are configured with Settings Management +keywords: admin, controls, settings management, reference +title: Settings reference +linkTitle: Settings reference +aliases: + - /security/for-admins/hardened-desktop/settings-management/settings-reference/ +--- + +This reference lists all Docker Desktop settings, including where they are configured, which operating systems they apply to, and whether they're available in the Docker Desktop GUI, the Docker Admin Console, or the `admin-settings.json` file. Settings are grouped to match the structure of the Docker Desktop interface. + +Each setting includes: + +- The display name used in Docker Desktop +- A table of values, default values, and required format +- A description and use cases +- OS compatibility +- Configuration methods: via [Docker Desktop](/manuals/desktop/settings-and-maintenance/settings.md), the Admin Console, or the `admin-settings.json` file + +Use this reference to compare how settings behave across different configuration +methods and platforms. + +## General + +### Start Docker Desktop when you sign in to your computer + +| Default value | Accepted values | Format | +|---------------|-----------------|--------| +| `false` | `true`, `false` | Boolean | + +- **Description:** Start Docker Desktop automatically when booting machine. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Ensure Docker Desktop is always running after boot. +- **Configure this setting with:** + - **General** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + +### Open Docker Dashboard when Docker Desktop starts + +| Default value | Accepted values | Format | +|---------------|----------------------------|--------| +| `false` | `true`, `false` | Boolean | + +- **Description:** Open the Docker Dashboard automatically when Docker Desktop starts. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Quickly access containers, images, and volumes in the Docker Dashboard after starting Docker Desktop. +- **Configure this setting with:** + - **General** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + +### Choose theme for Docker Desktop + +| Default value | Accepted values | Format | +|---------------|----------------------------|--------| +| `system` | `light`, `dark`, `system` | Enum | + +- **Description:** Choose the Docker Desktop GUI theme. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Personalize Docker Desktop appearance. +- **Configure this setting with:** + - **General** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + +### Configure shell completions + +| Default value | Accepted values | Format | +|---------------|-------------------------|--------| +| `integrated` | `integrated`, `system` | String | + +- **Description:** If installed, automatically edits your shell configuration. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Customize developer experience with shell completions. +- **Configure this setting with:** + - **General** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + +### Choose container terminal + +| Default value | Accepted values | Format | +|---------------|-------------------------|--------| +| `integrated` | `integrated`, `system` | String | + +- **Description:** Select default terminal for launching Docker CLI from Docker +Desktop. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Customize developer experience with preferred terminal. +- **Configure this setting with:** + - **General** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + +### Enable Docker terminal + +| Default value | Accepted values | Format | +|---------------|-----------------|--------| +| `false` | `true`, `false` | Boolean | + +- **Description:** Enable access to the Docker Desktop integrated terminal. If +the value is set to `false`, users can't use the Docker terminal to interact +with the host machine and execute commands directly from Docker Desktop. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Allow or restrict developer access to the built-in terminal. +- **Configure this setting with:** + - **General** setting in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + - Settings Management: `desktopTerminalEnabled` setting in the [`admin-settings.json` file](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md) + +### Enable Docker Debug by default + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `false` | `true`, `false` | Boolean | + +- **Description:** Enable debug logging by default for Docker CLI commands. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Assist with debugging support issues. +- **Configure this setting with:** + - **General** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + +### Include VM in Time Machine backup + +| Default value | Accepted values | Format | +|---------------|-----------------|--------| +| `false` | `true`, `false` | Boolean | + +- **Description:** Back up the Docker Desktop virtual machine. +- **OS:** {{< badge color=blue text="Mac only" >}} +- **Use case:** Manage persistence of application data. +- **Configure this setting with:** + - **General** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + +### Use containerd for pulling and storing images + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `false` | `true`, `false` | Boolean | + +- **Description:** Use containerd native snapshotter instead of legacy +snapshotters. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Improve image handling performance and compatibility. +- **Configure this setting with:** + - **General** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + +### Choose Virtual Machine Manager + +#### Docker VMM + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `true` | `true`, `false` | Boolean | + +#### Apple Virtualization framework + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `true` | `true`, `false` | Boolean | + +- **Description:** Use Apple Virtualization Framework to run Docker containers. +- **OS:** {{< badge color=blue text="Mac only" >}} +- **Use case:** Improve VM performance on Apple Silicon. +- **Configure this setting with:** + - **General** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + +#### Rosetta + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `true` | `true`, `false` | Boolean | + +- **Description:** Use Rosetta to emulate `amd64` on Apple Silicon. If value +is set to `true`, Docker Desktop turns on Rosetta to accelerate +x86_64/amd64 binary emulation on Apple Silicon. +- **OS:** {{< badge color=blue text="Mac only" >}} 13+ +- **Use case:** Run Intel-based containers on Apple Silicon hosts. + +> [!NOTE] +> +> In hardened environments, disable and lock this setting so only ARM-native +images are permitted. + +- **Configure this setting with:** + - **General** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + - Settings Management:`useVirtualizationFrameworkRosetta` setting in the [`admin-settings.json` file](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md) + - Settings Management: **Use Rosetta for x86_64/amd64 emulation on Apple Silicon** setting in the [Admin Console](/manuals/enterprise/security/hardened-desktop/settings-management/configure-admin-console.md) + +> [!NOTE] +> +> Rosetta requires enabling Apple Virtualization framework. + +#### QEMU + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `true` | `true`, `false` | Boolean | + +### Choose file sharing implementation + +#### VirtioFS + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `true` | `true`, `false` | Boolean | + +- **Description:** Use VirtioFS for fast, native file sharing between host and +containers. If value is set to `true`, VirtioFS is set as the file sharing +mechanism. If both VirtioFS and gRPC are set to `true`, VirtioFS takes +precedence. +- **OS:** {{< badge color=blue text="Mac only" >}} 12.5+ +- **Use case:** Improve volume mount performance and compatibility. + +> [!NOTE] +> +> In hardened environments, enable and lock this setting for macOS 12.5 and +later. + +- **Configure this setting with:** + - **General settings** in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + - Settings Management: `useVirtualizationFrameworkVirtioFS` setting in the [`admin-settings.json` file](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md) + - Settings Management: **Use VirtioFS for file sharing** setting in the [Admin Console](/manuals/enterprise/security/hardened-desktop/settings-management/configure-admin-console.md) + +#### gRPC FUSE + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `true` | `true`, `false` | Boolean | + +- **Description:** Enable gRPC FUSE for macOS file sharing. If value is set to +`true`, gRPC Fuse is set as the file sharing mechanism. +- **OS:** {{< badge color=blue text="Mac only" >}} +- **Use case:** Improve performance and compatibility of file mounts. + +> [!NOTE] +> +> In hardened environments, disable and lock this setting. + +- **Configure this setting with:** + - **General** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + - Settings Management: `useGrpcfuse` setting in the [`admin-settings.json` file](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md) + - Settings Management: **Use gRPC FUSE for file sharing** setting in the [Admin Console](/manuals/enterprise/security/hardened-desktop/settings-management/configure-admin-console.md) + +#### osxfs + +| Default value | Accepted values | Format | +| ------------- | --------------- | ------- | +| `false` | `true`, `false` | Boolean | + +- **Description:** Enable the legacy osxfs file sharing driver for macOS. When +set to true, Docker Desktop uses osxfs instead of VirtioFS or gRPC FUSE to mount +host directories into containers. +- **OS:** {{< badge color=blue text="Mac only" >}} +- **Use case:** Use the original file sharing implementation when compatibility +with older tooling or specific workflows is required. +- **Configure this setting with:** + - **General** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + +### Send usage statistics + +| Default value | Accepted values | Format | +|---------------|-----------------|--------| +| `true` | `true`, `false` | Boolean | + +- **Description:** Controls whether Docker Desktop collects and sends local +usage statistics and crash reports to Docker. This setting affects telemetry +gathered from the Docker Desktop application itself. It does not affect +server-side telemetry collected via Docker Hub or other backend services, such +as login timestamps, pulls, or builds. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Enable analytics to help Docker improve the product based on +usage data. + +> [!NOTE] +> +> In hardened environments, disable and lock this setting. This allows you +to control all your data flows and collect support logs via secure channels +if needed. + +- **Configure this setting with:** + - **General** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + - Settings Management: `analyticsEnabled` setting in the [`admin-settings.json` file](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md) + - Settings Management: **Send usage statistics** setting in the [Admin Console](/manuals/enterprise/security/hardened-desktop/settings-management/configure-admin-console.md) + +> [!NOTE] +> +> Organizations using the Insights Dashboard may need this setting enabled to +ensure that developer activity is fully visible. If users opt out and the +setting is not locked, their activity may be excluded from analytics +views. + +### Use Enhanced Container Isolation + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `false` | `true`, `false` | Boolean | + +- **Description:** Enable Enhanced Container Isolation for secure container +execution. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Prevent containers from modifying configuration or sensitive +host areas. + +> [!NOTE] +> +> In hardened environments, disable and lock this setting. + +- **Configure this setting with:** + - **General settings** in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + - Settings Management: `enhancedContainerIsolation` setting in the [`admin-settings.json` file](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md) + - Settings Management: **Enable enhanced container isolation** setting in the [Admin Console](/manuals/enterprise/security/hardened-desktop/settings-management/configure-admin-console.md) + +### Show CLI hints + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `true` | `true`, `false` | Boolean | + +- **Description:** Display helpful CLI tips in the terminal when using Docker commands. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Help users discover and learn Docker CLI features through inline suggestions. +- **Configure this setting with:** + - **General** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + +### Enable Scout image analysis + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `true` | `true`, `false` | Boolean | + +- **Description:** Enable Docker Scout to generate and display SBOM data for container images. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Turn on Docker Scout analysis features to view vulnerabilities, packages, and metadata associated with images. + +> [!NOTE] +> +> In hardened environments, enable and lock this setting to ensure SBOMs are +always built to satisfy compliance scans. + +- **Configure this setting with:** + - **General settings** in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + - Settings Management: `sbomIndexing` setting in the [`admin-settings.json` file](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md) + - Settings Management: **SBOM indexing** settings in the [Admin Console](/manuals/enterprise/security/hardened-desktop/settings-management/configure-admin-console.md) + +### Enable background Scout SBOM indexing + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `false` | `true`, `false` | Boolean | + +- **Description:** Automatically index SBOM data for images in the background without requiring user interaction. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Keep image metadata up to date by allowing Docker to perform SBOM indexing during idle time or after image pull operations. + +> [!NOTE] +> +> In hardened environments, enable and lock this setting. + +- **Configure this setting with:** + - **General settings** in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + +### Automatically check configuration + +| Default value | Accepted values | Format | +|-----------------------|-----------------|---------| +| `CurrentSettingsVersions` | Integer | Integer | + +- **Description:** Regularly checks your configuration to ensure no unexpected changes have been made by another application +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Track versions for compatibility +- **Configure this setting with:** + - **General** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + - Settings Management: `configurationFileVersion` setting in the [`admin-settings.json` file](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md) + +## Resources + +### CPU limit + +| Default value | Accepted values | Format | +|-----------------------------------------------|-----------------|---------| +| Number of logical CPU cores available on host | Integer | Integer | + +- **Description:** Number of CPUs assigned to the Docker Desktop virtual machine. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Resource allocation control. +- **Configure this setting with:** + - **Advanced** Resources settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + +### Memory limit + +| Default value | Accepted values | Format | +|---------------------------|-----------------|---------| +| Based on system resources | Integer | Integer | + +- **Description:** Amount of RAM (in MiB) assigned to the Docker virtual machine. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Control how much memory Docker can use on the host. +- **Configure this setting with:** + - **Advanced** Resources settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + +### Swap + +| Default value | Accepted values | Format | +|---------------|-----------------|---------| +| `1024` | Integer | Integer | + +- **Description:** Amount of swap space (in MiB) assigned to the Docker virtual machine +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Extend memory availability via swap +- **Configure this setting with:** + - **Advanced** Resources settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + +### Disk usage limit + +| Default value | Accepted values | Format | +|-------------------------------|-----------------|---------| +| Default disk size of machine. | Integer | Integer | + +- **Description:** Maximum disk size (in MiB) allocated for Docker Desktop. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Constrain Docker's virtual disk size for storage management. +- **Configure this setting with:** + - **Advanced** Resources settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + +### Disk image location + +| Default value | Accepted values | Format | +|--------------------------------------------------|-----------------|--------| +| macOS: `~/Library/Containers/com.docker.docker/Data/vms/0`
Windows: `%USERPROFILE%\AppData\Local\Docker\wsl\data` | File path | String | + +- **Description:** Path where Docker Desktop stores virtual machine data. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Redirect Docker data to a custom location. +- **Configure this setting with:** + - **Advanced** Resources settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + +### Enable Resource Saver + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `true` | `true`, `false` | Boolean | + +- **Description:** Enable Docker Desktop to pause when idle. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Save system resources during periods of inactivity. +- **Configure this setting with:** + - **Advanced** Resources settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + +### File sharing directories + +| Default value | Accepted values | Format | +|----------------------------------------|---------------------------------|--------------------------| +| Varies by OS | List of file paths as strings | Array list of strings | + +- **Description:** List of allowed directories shared between the host and +containers. When a path is added, its subdirectories are allowed. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Restrict or define what file paths are available to containers. + +> [!NOTE] +> +> In hardened environments, lock to an explicit whitelist and disable end-user +edits. + +- **Configure this setting with:** + - **File sharing** Resources settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + - Settings Management: `filesharingAllowedDirectories` setting in the [`admin-settings.json` file](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md) + - Settings Management: **Allowed file sharing directories** setting in the [Admin Console](/manuals/enterprise/security/hardened-desktop/settings-management/configure-admin-console.md) + +### Proxy exclude + +| Default value | Accepted values | Format | +|---------------|--------------------|--------| +| `""` | List of addresses | String | + +- **Description:** Configure addresses that containers should bypass from proxy +settings. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Fine-tune proxy exceptions for container networking. + +> [!NOTE] +> +> In hardened environments, disable and lock this setting. + +- **Configure this setting with:** + - **Proxies** Resources settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + - Settings Management: `proxy` setting with `manual` and `exclude` modes in the [`admin-settings.json` file](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md) + +### Docker subnet + +| Default value | Accepted values | Format | +|-------------------|-----------------|--------| +| `192.168.65.0/24` | IP address | String | + +- **Description:** Overrides the network range used for vpnkit DHCP/DNS for +`*.docker.internal`. +- **OS:** {{< badge color=blue text="Mac only" >}} +- **Use case:** Customize the subnet used for Docker container networking. +- **Configure this setting with:** + - Settings Management: `vpnkitCIDR` setting in the [`admin-settings.json` file](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md) + - Settings Management: **VPN Kit CIDR** setting in the [Admin Console](/manuals/enterprise/security/hardened-desktop/settings-management/configure-admin-console.md) + +### Use kernel networking for UDP + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `false` | `true`, `false` | Boolean | + +- **Description:** Use the host’s kernel network stack for UDP traffic instead of Docker’s virtual network driver. This enables faster and more direct UDP communication, but may bypass some container isolation features. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Improve performance or compatibility for workloads that rely heavily on UDP traffic, such as real-time media, DNS, or game servers. +- **Configure this setting with:** + - **Network** Resources settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + +### Enable host networking + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `false` | `true`, `false` | Boolean | + +- **Description:** Enable experimental host networking support. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Allow containers to use the host network stack. +- **Configure this setting with:** + - **Network** Resources settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + +### Networking mode + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `dual-stack` | `ipv4only`, `ipv6only` | String | + +- **Description:** Set the networking mode. +- **OS:** {{< badge color=blue text="Windows and Mac" >}} +- **Use case:** Choose the default IP protocol used when Docker creates new networks. +- **Configure this setting with:** + - **Network** Resources settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + - Settings Management: `defaultNetworkingMode` setting in the [`admin-settings.json` file](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md) + +For more information, see [Networking](/manuals/desktop/features/networking.md#networking-mode-and-dns-behaviour-for-mac-and-windows). + +#### Inhibit DNS resolution for IPv4/IPv6 + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `auto` | `ipv4`, `ipv6`, `none` | String | + +- **Description:** Filters unsupported DNS record types. Requires Docker Desktop +version 4.43 and up. +- **OS:** {{< badge color=blue text="Windows and Mac" >}} +- **Use case:** Control how Docker filters DNS records returned to containers, improving reliability in environments where only IPv4 or IPv6 is supported. +- **Configure this setting with:** + - **Network** Resources settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + - Settings Management: `dnsInhibition` setting in the [`admin-settings.json` file](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md) + +For more information, see [Networking](/manuals/desktop/features/networking.md#networking-mode-and-dns-behaviour-for-mac-and-windows). + +### Enable WSL engine + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `true` | `true`, `false` | Boolean | + +- **Description:** If the value is set to `true`, Docker Desktop uses the WSL2 +based engine. This overrides anything that may have been set at installation +using the `--backend=` flag. +- **OS:** {{< badge color=blue text="Windows only" >}} + WSL +- **Use case:** Enable Linux containers via WSL 2 backend. + +> [!NOTE] +> +> In hardened environments, enable and lock this setting. + +- **Configure this setting with:** + - **WSL Integration** Resources settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + - Settings Management: `wslEngineEnabled` setting in the [`admin-settings.json` file](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md) + - Settings Management: **Windows Subsystem for Linux (WSL) Engine** setting in the [Admin Console](/manuals/enterprise/security/hardened-desktop/settings-management/configure-admin-console.md) + +## Docker Engine + +The Docker Engine settings let you configure low-level daemon settings through a raw JSON object. These settings are passed directly to the dockerd process that powers container management in Docker Desktop. + +| Key | Example | Description | Accepted values / Format | Default | +| --------------------- | --------------------------- | -------------------------------------------------- | ------------------------------ | ------- | +| `debug` | `true` | Enable verbose logging in the Docker daemon | Boolean | `false` | +| `experimental` | `true` | Enable experimental Docker CLI and daemon features | Boolean | `false` | +| `insecure-registries` | `["myregistry.local:5000"]` | Allow pulling from HTTP registries without TLS | Array of strings (`host:port`) | `[]` | +| `registry-mirrors` | `["https://mirror.gcr.io"]` | Define alternative registry endpoints | Array of URLs | `[]` | + +- **Description:** Customize the behavior of the Docker daemon using a structured JSON config passed directly to dockerd. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Fine-tune registry access, enable debug mode, or opt into experimental features. +- **Configure this setting with:** + - **Docker Engine** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + +> [!NOTE] +> +> Values for this setting are passed as-is to the Docker daemon. Invalid or unsupported fields may prevent Docker Desktop from starting. + +## Builders + +Builders settings lets you manage Buildx builder instances for advanced image-building scenarios, including multi-platform builds and custom backends. + +| Key | Example | Description | Accepted values / Format | Default | +| ----------- | -------------------------------- | -------------------------------------------------------------------------- | ------------------------- | --------- | +| `name` | `"my-builder"` | Name of the builder instance | String | — | +| `driver` | `"docker-container"` | Backend used by the builder (`docker`, `docker-container`, `remote`, etc.) | String | `docker` | +| `platforms` | `["linux/amd64", "linux/arm64"]` | Target platforms supported by the builder | Array of platform strings | Host arch | + +- **Description:** Configure custom Buildx builders for Docker Desktop, including driver type and supported platforms. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Set up advanced build configurations like cross-platform images or remote builders. +- **Configure this setting with:** + - **Builders** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + +> [!NOTE] +> +> Builder definitions are structured as an array of objects, each describing a builder instance. Conflicting or unsupported configurations may cause build errors. + +## Kubernetes + +### Enable Kubernetes + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `false` | `true`, `false` | Boolean | + +- **Description:** Enable the integrated Kubernetes cluster in Docker Desktop. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Enable or disable Kubernetes support for developers. + +> [!NOTE] +> +> In hardened environments, disable and lock this setting. + +> [!IMPORTANT] +> +> When Kubernetes is enabled through Settings Management policies, only the +`kubeadm` cluster provisioning method is supported. The `kind` provisioning +method is not yet supported by Settings Management. + +- **Configure this setting with:** + - **Kubernetes** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + - Settings Management: `kubernetes` setting in the [`admin-settings.json` file](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md) + - Settings Management: **Allow Kubernetes** setting in the [Admin Console](/manuals/enterprise/security/hardened-desktop/settings-management/configure-admin-console.md) + +### Choose cluster provisioning method + +| Default value | Accepted values | Format | +|---------------|-----------------|--------| +| `kubeadm` | `kubeadm`, `kind` | String | + +- **Description:** Set the Kubernetes node mode (single-node or multi-node). +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Control the topology of the integrated Kubernetes cluster. +- **Configure this setting with:** + - **Kubernetes** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + +### Kubernetes node count (kind provisioning) + +| Default value | Accepted values | Format | +|---------------|-----------------|---------| +| `1` | Integer | Integer | + +- **Description:** Number of nodes to create in a multi-node Kubernetes cluster. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Scale the number of Kubernetes nodes for development or testing. +- **Configure this setting with:** + - **Kubernetes** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + +### Kubernetes node version (kind provisioning) + +| Default value | Accepted values | Format | +|---------------|-------------------------------|--------| +| `1.31.1` | Semantic version (e.g., 1.29.1) | String | + +- **Description:** Version of Kubernetes used for cluster node creation. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Pin a specific Kubernetes version for consistency or +compatibility. +- **Configure this setting with:** + - **Kubernetes** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + +### Show system containers + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `false` | `true`, `false` | Boolean | + +- **Description:** Show Kubernetes system containers in the Docker Dashboard container list +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Allow developers to view kube-system containers for debugging + +> [!NOTE] +> +> In hardened environments, disable and lock this setting. + +- **Configure this setting with:** + - **Kubernetes** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + +### Custom Kubernetes image repository + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `""` | Registry URL | String | + +- **Description**: Configure a custom image repository for Kubernetes control +plane images. This allows Docker Desktop to pull Kubernetes system +images from a private registry or mirror instead of Docker Hub. This setting +overrides the `[registry[:port]/][namespace]` portion of image names. +- **OS**: {{< badge color=blue text="All" >}} +- **Use case**: Use private registries in air-gapped environments or +when Docker Hub access is restricted. + +> [!NOTE] +> +> The images must be cloned/mirrored from Docker Hub with matching tags. The +specific images required depend on the cluster provisioning method (`kubeadm` +or `kind`). See the Kubernetes documentation for the complete list +of required images and detailed setup instructions. + +- **Configure this setting with**: + - Settings Management: `KubernetesImagesRepository` settings in the + [`admin-settings.json` file](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md) + - Settings Management: **Kubernetes Images Repository** setting in the + [Admin Console](/manuals/enterprise/security/hardened-desktop/settings-management/configure-admin-console.md) + +> [!IMPORTANT] +> +> When using `KubernetesImagesRepository` with Enhanced Container Isolation (ECI) +enabled, you must add the following images to the ECI Docker socket mount image +list: `[imagesRepository]/desktop-cloud-provider-kind:*` and +`[imagesRepository]/desktop-containerd-registry-mirror:*`. + +## Software updates + +### Automatically check for updates + +| Default value | Accepted values | Format | +|---------------|-----------------|--------| +| `false` | `true`, `false` | Boolean | + +- **Description:** Disable automatic update polling for Docker Desktop. If the +value is set to `true`, checking for updates and notifications about Docker +Desktop updates are disabled. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Freeze the current version in enterprise environments. + +> [!NOTE] +> +> In hardened environments, enable this setting and lock. This guarantees that +only internally vetted versions are installed. + +- **Configure this setting with:** + - Settings Management: `disableUpdate` setting in the [`admin-settings.json` file](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md) + - Settings Management: **Disable update** setting in the [Admin Console](/manuals/enterprise/security/hardened-desktop/settings-management/configure-admin-console.md) + +### Always download updates + +| Default value | Accepted values | Format | +|---------------|-----------------|--------| +| `false` | `true`, `false` | Boolean | + +- **Description:** Automatically download Docker Desktop updates when available. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Manage auto update behavior. +- **Configure this setting with:** + - **Software updates** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + - Settings Management: **Disable updates** setting in the [Admin Console](/manuals/enterprise/security/hardened-desktop/settings-management/configure-admin-console.md) + +## Extensions + +### Enable Docker extensions + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `true` | `true`, `false` | Boolean | + +- **Description:** Enable or disable Docker Extensions. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Control access to the Extensions Marketplace and installed +extensions. + +> [!NOTE] +> +> In hardened environments, disable and lock this setting. This prevents +third-party or unvetted plugins from being installed. + +- **Configure this setting with:** + - **Extensions** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + - Settings Management: `extensionsEnabled` setting in the [`admin-settings.json` file](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md) + - Settings Management: **Allow Extensions** setting in the [Admin Console](/manuals/enterprise/security/hardened-desktop/settings-management/configure-admin-console.md) + +### Allow only extensions distributed through the Docker Marketplace + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `true` | `true`, `false` | Boolean | + +- **Description:** Restrict Docker Desktop to only run Marketplace extensions. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Prevent running third-party or local extensions. +- **Configure this setting with:** + - **Extensions** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + +### Show Docker Extensions system containers + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `false` | `true`, `false` | Boolean | + +- **Description:** Show system containers used by Docker Extensions in the container list +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Help developers troubleshoot or view extension system containers +- **Configure this setting with:** + - **Extensions** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + +## Beta features + +> [!IMPORTANT] +> +> For Docker Desktop versions 4.41 and earlier, these settings lived under the **Experimental features** tab on the **Features in development** page. + +### Enable Docker AI + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `false` | `true`, `false` | Boolean | + +- **Description:** Enable Docker AI features in the Docker Desktop experience. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Enable or disable AI features like "Ask Gordon". +- **Configure this setting with:** + - **Beta** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + - Settings Management: `enableDockerAI` setting in the [`admin-settings.json` file](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md) + +### Enable Docker Model Runner + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `true` | `true`, `false` | Boolean | + +- **Description:** Enable Docker Model Runner features in Docker Desktop. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Enable or disable Docker Model Runner features. +- **Configure this setting with:** + - **Beta** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + - Settings Management: `enableDockerAI` setting in the [`admin-settings.json` file](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md) + +#### Enable host-side TCP support + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `false` | `true`, `false` | Boolean | + +- **Description:** Enable Docker Model Runner features in Docker Desktop. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Enable or disable Docker Model Runner features. +- **Configure this setting with:** + - **Beta** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + - Settings Management: `enableDockerAI` setting in the [`admin-settings.json` file](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md) + +> [!NOTE] +> +> This setting requires Docker Model Runner setting to be enabled first. + +##### Port + +| Default value | Accepted values | Format | +|---------------|-----------------|---------| +| 12434 | Integer | Integer | + +- **Description:** Specifies the exposed TCP port. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Connect to the Model Runner via TCP. +- **Configure this setting with:** + - **Beta features** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + - Settings Management: `enableInferenceTCP` setting in the [`admin-settings.json` file](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md) + +##### CORS Allowed Origins + +| Default value | Accepted values | Format | +|---------------|---------------------------------------------------------------------------------|--------| +| Empty string | Empty string to deny all,`*` to accept all, or a list of comma-separated values | String | + +- **Description:** Specifies the allowed CORS origins. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Integration with a web app. +- **Configure this setting with:** + - **Beta features** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + - Settings Management: `enableInferenceCORS` setting in the [`admin-settings.json` file](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md) + +### Enable Docker MCP Toolkit + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `true` | `true`, `false` | Boolean | + +- **Description:** Enable [Docker MCP Toolkit](/manuals/ai/mcp-catalog-and-toolkit/_index.md) in Docker Desktop. +- **OS:** {{< badge color=blue text="All" >}} +- **Configure this setting with:** + - **Beta** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + - Settings Management: `enableDockerMCPToolkit` setting in the [`admin-settings.json` file](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md) + + +### Enable Wasm + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `true` | `true`, `false` | Boolean | + +- **Description:** Enable [Wasm](/manuals/desktop/features/wasm.md) to run Wasm workloads. +- **OS:** {{< badge color=blue text="All" >}} +- **Configure this setting with:** + - **Beta** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + +### Enable Compose Bridge + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `true` | `true`, `false` | Boolean | + +- **Description:** Enable [Compose Bridge](/manuals/compose/bridge/_index.md). +- **OS:** {{< badge color=blue text="All" >}} +- **Configure this setting with:** + - **Beta** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + +## Notifications + +### Status updates on tasks and processes + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `true` | `true`, `false` | Boolean | + +- **Description:** Display general informational messages inside Docker Desktop +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Customize in-app communication visibility +- **Configure this setting with:** + - **Notifications** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + +### Recommendations from Docker + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `true` | `true`, `false` | Boolean | + +- **Description:** Display promotional announcements and banners inside Docker Desktop +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Control exposure to Docker news and feature promotion +- **Configure this setting with:** + - **Notifications** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + +### Docker announcements + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `true` | `true`, `false` | Boolean | + +- **Description:** Display general announcements inside Docker Desktop. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Enable or suppress Docker-wide announcements in the GUI. +- **Configure this setting with:** + - **Notifications** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + +### Docker surveys + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `true` | `true`, `false` | Boolean | + +- **Description:** Display notifications inviting users to participate in surveys +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Enable or disable in-product survey prompts +- **Configure this setting with:** + - **Notifications** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + +### Docker Scout Notification pop-ups + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `true` | `true`, `false` | Boolean | + +- **Description:** Enable Docker Scout popups inside Docker Desktop. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Show or hide vulnerability scan notifications +- **Configure this setting with:** + - **Notifications** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + +### Docker Scout OS notifications + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `false` | `true`, `false` | Boolean | + +- **Description:** Enable Docker Scout notifications through the operating system. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Push Scout updates via system notification center +- **Configure this setting with:** + - **Notifications** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + +## Advanced + +### Configure installation of Docker CLI + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `system` | File path | String | + +- **Description:** Install location for Docker CLI binaries. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Customize CLI install location for compliance or tooling. +- **Configure this setting with:** + - **Advanced** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + +### Allow the default Docker socket to be used + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `true` | `true`, `false` | Boolean | + +- **Description:** By default, enhanced container isolation blocks bind-mounting +the Docker Engine socket into containers +(e.g., `docker run -v /var/run/docker.sock:/var/run/docker.sock ...`). This lets +you relax this in a controlled way. See ECI Configuration for more info. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Allow containers to access the Docker socket for scenarios like +Docker-in-Docker or containerized CI agents. +- **Configure this setting with:** + - **Advanced** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + - Settings Management: `dockerSocketMount` setting in the [`admin-settings.json` file](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md) + +### Allow privileged port mapping + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `true` | `true`, `false` | Boolean | + +- **Description:** Starts the privileged helper process which binds privileged ports that are between 1 and 1024 +- **OS:** {{< badge color=blue text="Mac only" >}} +- **Use case:** Enforce elevated privileges for networking support +- **Configure this setting with:** + - **Advanced** settings in [Docker Desktop GUI](/manuals/desktop/settings-and-maintenance/settings.md) + +## Settings not available in the Docker Desktop GUI + +The following settings aren’t shown in the Docker Desktop GUI. You can only configure them using Settings Management with the Admin Console or the `admin-settings.json` file. + +### Block `docker load` + +| Default value | Accepted values | Format | +|---------------|-----------------|--------| +| `false` | `true`, `false` | Boolean | + +- **Description:** Prevent users from loading local Docker images using the `docker load` command. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Enforce image provenance by restricting local image imports. + +> [!NOTE] +> +> In hardened environments, enable and lock this setting. This forces all images +to come from your secure, scanned registry. + +- **Configure this setting with:** + - Settings Management: `blockDockerLoad` setting in the [`admin-settings.json` file](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md) + +### Expose Docker API on TCP 2375 + +| Default value | Accepted values | Format | +|---------------|-----------------|--------| +| `false` | `true`, `false` | Boolean | + +- **Description:** Exposes the Docker API over an unauthenticated TCP socket on port 2375. Only recommended for isolated and protected environments. +- **OS:** {{< badge color=blue text="Windows only" >}} +- **Use case:** Required for legacy integrations or environments without named pipe support. + +> [!NOTE] +> +> In hardened environments, disable and lock this setting. This ensures the +Docker API is only reachable via the secure internal socket. + +- **Configure this setting with:** + - Settings Management: `exposeDockerAPIOnTCP2375` in the [`admin-settings.json` file](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md) + +### Air-gapped container proxy + +| Default value | Accepted values | Format | +| ------------- | --------------- | ----------- | +| See example | Object | JSON object | + +- **Description:** Configure a manual HTTP/HTTPS proxy for containers. Useful in air-gapped environments where containers need restricted access. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Redirect or block container networking to comply with offline or secured network environments. +- **Configure this setting with:** + - Settings Management: `containersProxy` setting in the [`admin-settings.json` file](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md) + +#### Example + +```json +"containersProxy": { + "locked": true, + "mode": "manual", + "http": "", + "https": "", + "exclude": [], + "pac": "", + "transparentPorts": "" +} +``` + +Docker socket access control (ECI exceptions) + +| Default value | Accepted values | Format | +| ------------- | --------------- | ----------- | +| - | Object | JSON object | + +- **Description:** Allow specific images or commands to use the Docker socket when Enhanced Container Isolation is enabled. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Support tools like Testcontainers or LocalStack that need Docker socket access while maintaining secure defaults. +- Configure this setting with: + - Settings Management: `enhancedContainerIsolation` > `dockerSocketMount` in the [`admin-settings.json` file](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md) + +#### Example + +```json +"enhancedContainerIsolation": { + "locked": true, + "value": true, + "dockerSocketMount": { + "imageList": { + "images": [ + "docker.io/localstack/localstack:*", + "docker.io/testcontainers/ryuk:*" + ] + }, + "commandList": { + "type": "deny", + "commands": ["push"] + } + } +} +``` + +### Allow beta features + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `false` | `true`, `false` | Boolean | + +- **Description:** Enable access to beta features in Docker Desktop. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Give developers early access to features that are in public beta. + +> [!NOTE] +> +> In hardened environments, disable and lock this setting. + +- **Configure this setting with:** + - Settings Management: `allowBetaFeatures` setting in the [`admin-settings.json` file](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md) + +### Docker daemon options (Linux or Windows) + +| Default value | Accepted values | Format | +|---------------|-----------------|----------| +| `{}` | JSON object | Stringified JSON | + +- **Description:** Override the Docker daemon configuration used in Linux or Windows containers. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Configure low-level Docker daemon options (e.g., logging, storage drivers) without editing the local config files. + +> [!NOTE] +> +> In hardened environments, provide a vetted JSON config and lock it so no +overrides are possible. + +- **Configure this setting with:** + - Settings Management: `linuxVM.dockerDaemonOptions` or `windowsContainers.dockerDaemonOptions` in the [`admin-settings.json` file](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md) + +### VPNKit CIDR + +| Default value | Accepted values | Format | +|-------------------|-----------------|--------| +| `192.168.65.0/24` | CIDR notation | String | + +- **Description:** Set the subnet used for internal VPNKit DHCP/DNS services. +- **OS:** {{< badge color=blue text="Mac only" >}} +- **Use case:** Prevent IP conflicts in environments with overlapping subnets. + +> [!NOTE] +> +> In hardened environments, lock to an approved, non-conflicting CIDR. + +- **Configure this setting with:** + - Settings Management: `vpnkitCIDR` setting in the [`admin-settings.json` file](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md) + - Settings Management: **VPN Kit CIDR** setting in the [Admin Console](/manuals/enterprise/security/hardened-desktop/settings-management/configure-admin-console.md) + +### Enable Kerberos and NTLM authentication + +| Default value | Accepted values | Format | +|---------------|-----------------|--------| +| `false` | `true`, `false` | Boolean | + +- **Description:** Enables Kerberos and NTLM proxy authentication for enterprise environments. +- **OS:** {{< badge color=blue text="All" >}} +- **Use case:** Allow users to authenticate with enterprise proxy servers that require Kerberos or NTLM. +- **Configure this setting with:** + - Settings Management: `proxy.enableKerberosNtlm` in the [`admin-settings.json` file](/manuals/enterprise/security/hardened-desktop/settings-management/configure-json-file.md) diff --git a/content/manuals/security/images/enforce-sign-in.png b/content/manuals/enterprise/security/images/enforce-sign-in.png similarity index 100% rename from content/manuals/security/images/enforce-sign-in.png rename to content/manuals/enterprise/security/images/enforce-sign-in.png diff --git a/content/manuals/security/images/jit-disabled-flow.svg b/content/manuals/enterprise/security/images/jit-disabled-flow.svg similarity index 100% rename from content/manuals/security/images/jit-disabled-flow.svg rename to content/manuals/enterprise/security/images/jit-disabled-flow.svg diff --git a/content/manuals/security/images/jit-enabled-flow.svg b/content/manuals/enterprise/security/images/jit-enabled-flow.svg similarity index 100% rename from content/manuals/security/images/jit-enabled-flow.svg rename to content/manuals/enterprise/security/images/jit-enabled-flow.svg diff --git a/content/manuals/security/images/roles-and-permissions-member-editor-roles.png b/content/manuals/enterprise/security/images/roles-and-permissions-member-editor-roles.png similarity index 100% rename from content/manuals/security/images/roles-and-permissions-member-editor-roles.png rename to content/manuals/enterprise/security/images/roles-and-permissions-member-editor-roles.png diff --git a/content/manuals/security/for-admins/provisioning/_index.md b/content/manuals/enterprise/security/provisioning/_index.md similarity index 86% rename from content/manuals/security/for-admins/provisioning/_index.md rename to content/manuals/enterprise/security/provisioning/_index.md index 1668e9d1b9e4..8d4eca3c8e5d 100644 --- a/content/manuals/security/for-admins/provisioning/_index.md +++ b/content/manuals/enterprise/security/provisioning/_index.md @@ -4,6 +4,8 @@ keywords: provision users, provisioning, JIT, SCIM, group mapping, sso, docker h title: Provision users linkTitle: Provision weight: 20 +aliases: + - /security/for-admins/provisioning/ --- {{< summary-bar feature_name="SSO" >}} @@ -38,6 +40,10 @@ When a user signs in through SSO, Docker obtains several attributes from your Id - **Docker Org**: Optional. Specifies the organization the user belongs to - **Docker Team**: Optional. Defines the team the user belongs to within the organization - **Docker Role**: Optional. Determines the user's permission within Docker +- **Docker session minutes**: Optional. Sets the duration of a user’s session before they must re-authenticate with their identity provider (IdP). The value must be a positive integer greater than 0. +If this is attribute is not provided, by default: + - Docker Desktop signs you out after 90 days, or 30 days of inactivity. + - Docker Hub and Docker Home sign you out after 24 hours. If your organization uses SAML for SSO, Docker retrieves these attributes from the SAML assertion message. Keep in mind that different IdPs may use different names for these attributes. The following reference table outlines possible SAML attributes used by Docker: @@ -49,10 +55,11 @@ If your organization uses SAML for SSO, Docker retrieves these attributes from t | Docker Org (optional) | `dockerOrg` | | Docker Team (optional) | `dockerTeam` | | Docker Role (optional) | `dockerRole` | +| Docker session minutes (optional) | `dockerSessionMinutes`, must be a positive integer > 0 | ## What's next? Review the provisioning method guides for steps on configuring provisioning methods: -- [JIT](/manuals/security/for-admins/provisioning/just-in-time.md) -- [SCIM](/manuals/security/for-admins/provisioning/scim.md) -- [Group mapping](/manuals/security/for-admins/provisioning/group-mapping.md) \ No newline at end of file +- [JIT](just-in-time.md) +- [SCIM](scim.md) +- [Group mapping](group-mapping.md) \ No newline at end of file diff --git a/content/manuals/security/for-admins/provisioning/group-mapping.md b/content/manuals/enterprise/security/provisioning/group-mapping.md similarity index 99% rename from content/manuals/security/for-admins/provisioning/group-mapping.md rename to content/manuals/enterprise/security/provisioning/group-mapping.md index 595087318200..07aedb379e2b 100644 --- a/content/manuals/security/for-admins/provisioning/group-mapping.md +++ b/content/manuals/enterprise/security/provisioning/group-mapping.md @@ -7,6 +7,7 @@ aliases: - /admin/organization/security-settings/group-mapping/ - /docker-hub/group-mapping/ - /security/for-admins/group-mapping/ +- /security/for-admins/provisioning/group-mapping/ weight: 40 --- diff --git a/content/manuals/security/for-admins/provisioning/just-in-time.md b/content/manuals/enterprise/security/provisioning/just-in-time.md similarity index 88% rename from content/manuals/security/for-admins/provisioning/just-in-time.md rename to content/manuals/enterprise/security/provisioning/just-in-time.md index 597a636ae80a..1d1bc6ad65af 100644 --- a/content/manuals/security/for-admins/provisioning/just-in-time.md +++ b/content/manuals/enterprise/security/provisioning/just-in-time.md @@ -3,6 +3,8 @@ description: Learn how Just-in-Time provisioning works with your SSO connection. keywords: user provisioning, just-in-time provisioning, JIT, autoprovision, Docker Hub, Docker Admin, admin, security title: Just-in-Time provisioning linkTitle: Just-in-Time +aliases: + - /security/for-admins/provisioning/just-in-time/ --- {{< summary-bar feature_name="SSO" >}} @@ -30,7 +32,7 @@ When a user signs in with SSO and your SSO configuration has JIT provisioning en The following graphic provides an overview of SSO authentication with JIT enabled: - ![JIT provisioning enabled](../../images/jit-enabled-flow.svg) + ![JIT provisioning enabled](../images/jit-enabled-flow.svg) ## SSO authentication with JIT provisioning disabled @@ -46,11 +48,11 @@ When JIT provisioning is disabled in your SSO connection, the following actions - Invitation found: If the user is a member of the organization or has a pending invitation, sign-in is successful, and the invitation is automatically accepted. - No invitation found: If the user is not a member of the organization and has no pending invitation, the sign-in fails, and an `Access denied` error appears. The user must contact an administrator to be invited to the organization. -With JIT disabled, group mapping is only available if you have [SCIM enabled](/security/for-admins/provisioning/scim/#enable-scim-in-docker). If SCIM is not enabled, users won't be auto-provisioned to groups. +With JIT disabled, group mapping is only available if you have [SCIM enabled](scim/#enable-scim-in-docker). If SCIM is not enabled, users won't be auto-provisioned to groups. The following graphic provides an overview of SSO authentication with JIT disabled: -![JIT provisioning disabled](../../images/jit-disabled-flow.svg) +![JIT provisioning disabled](../images/jit-disabled-flow.svg) ## Disable JIT provisioning @@ -65,7 +67,7 @@ You may want to disable JIT provisioning for reasons such as the following: Users are provisioned with JIT by default. If you enable SCIM, you can disable JIT: -1. In the [Admin Console](https://app.docker.com/admin), select your organization. -2. Select **SSO and SCIM**. -3. In the SSO connections table, select the **Action** icon and then **Disable JIT provisioning**. -4. Select **Disable** to confirm. +1. In [Docker Home](https://app.docker.com/), select your organization. +1. Select **Admin Console**, then **SSO and SCIM**. +1. In the SSO connections table, select the **Action** icon and then **Disable JIT provisioning**. +1. Select **Disable** to confirm. diff --git a/content/manuals/enterprise/security/provisioning/scim.md b/content/manuals/enterprise/security/provisioning/scim.md new file mode 100644 index 000000000000..1b2c36d3355e --- /dev/null +++ b/content/manuals/enterprise/security/provisioning/scim.md @@ -0,0 +1,414 @@ +--- +keywords: SCIM, SSO, user provisioning, de-provisioning, role mapping, assign users +title: SCIM provisioning +linkTitle: SCIM +description: Learn how System for Cross-domain Identity Management works and how to set it up. +aliases: + - /security/for-admins/scim/ + - /docker-hub/scim/ + - /security/for-admins/provisioning/scim/ +weight: 30 +--- + +{{< summary-bar feature_name="SSO" >}} + +System for Cross-domain Identity Management (SCIM) is available for Docker +Business customers. This guide provides an overview of SCIM provisioning. + +## How SCIM works + +SCIM automates user provisioning and de-provisioning for Docker through your +identity provider (IdP). After you enable SCIM, any user assigned to your +Docker application in your IdP is automatically provisioned and added to your +Docker organization. When a user is removed from the Docker application in your +IdP, SCIM deactivates and removes them from your Docker organization. + +In addition to provisioning and removal, SCIM also syncs profile updates like +name changes—made in your IdP. You can use SCIM alongside Docker’s default +Just-in-Time (JIT) provisioning or on its own with JIT disabled. + +SCIM automates: + +- Creating users +- Updating user profiles +- Removing and deactivating users +- Re-activating users +- Group mapping + +> [!NOTE] +> +> SCIM only manages users provisioned through your IdP after SCIM is enabled. +It cannot remove users who were manually added to your Docker organization +before SCIM was set up. +> +> To remove those users, delete them manually from your Docker organization. +For more information, see [Manage organization members](/manuals/admin/organization/members.md). + +## Supported attributes + +SCIM uses attributes (e.g., name, email) to sync user information between your +IdP and Docker. Properly mapping these attributes in your IdP ensures that user +provisioning works smoothly and prevents issues like duplicate user accounts +when using single sign-on (SSO). + +Docker supports the following SCIM attributes: + +| Attribute | Description | +|:---------------------------------------------------------------|:-------------------------------------------------------------------------------------------| +| userName | User’s primary email address, used as the unique identifier | +| name.givenName | User’s first name | +| name.familyName | User’s surname | +| active | Indicates if a user is enabled or disabled, set to “false” to de-provision a user | + +For additional details about supported attributes and SCIM, see [Docker Hub API SCIM reference](/reference/api/hub/latest/#tag/scim). + +> [!IMPORTANT] +> +> By default, Docker uses Just-in-Time (JIT) provisioning for SSO. If SCIM is +enabled, JIT values still take precedence and will overwrite attribute values +set by SCIM. To avoid conflicts, make sure your JIT attribute values match your +SCIM values. +> +> Alternatively, you can disable JIT provisioning to rely solely on SCIM. +For details, see [Just-in-Time](just-in-time.md). + +## Prerequisites + +- You've [set up SSO](../single-sign-on/_index.md) +with Docker and verified your domain. +- You have access to your identity provider's administrator portal with +permission to create and manage applications. + +## Enable SCIM in Docker + +You must [configure SSO](../single-sign-on/configure/_index.md) before you enable SCIM. Enforcing SSO isn't required to use SCIM. + +{{< tabs >}} +{{< tab name="Admin Console" >}} + +{{% admin-scim product="admin" %}} + +{{< /tab >}} +{{< tab name="Docker Hub" >}} + +{{% include "hub-org-management.md" %}} + +{{% admin-scim %}} + +{{< /tab >}} +{{< /tabs >}} + +## Enable SCIM in your IdP + +The user interface for your IdP may differ slightly from the following steps. You can refer to the documentation for your IdP to verify. For additional details, see the documentation for your IdP: + +- [Okta](https://help.okta.com/en-us/Content/Topics/Apps/Apps_App_Integration_Wizard_SCIM.htm) +- [Entra ID/Azure AD SAML 2.0](https://learn.microsoft.com/en-us/azure/active-directory/app-provisioning/user-provisioning) + +> [!NOTE] +> +> Microsoft does not currently support SCIM and OIDC in the same non-gallery +application in Entra ID. This guide provides a verified workaround using a +separate non-gallery app for SCIM provisioning. While Microsoft does not +officially document this setup, it is widely used and supported in practice. + +{{< tabs >}} +{{< tab name="Okta" >}} + +### Step one: Enable SCIM + +1. Sign in to Okta and select **Admin** to open the admin portal. +1. Open the application you created when you configured your SSO connection. +1. On the application page, select the **General** tab, then **Edit App Settings**. +1. Enable SCIM provisioning, then select **Save**. +1. Now you can access the **Provisioning** tab in Okta. Navigate to this tab, then select **Edit SCIM Connection**. +1. To configure SCIM in Okta, set up your connection using the following values and settings: + - SCIM Base URL: SCIM connector base URL (copied from Docker Hub) + - Unique identifier field for users: `email` + - Supported provisioning actions: **Push New Users** and **Push Profile Updates** + - Authentication Mode: HTTP Header + - SCIM Bearer Token: HTTP Header Authorization Bearer Token (copied from Docker Hub) +1. Select **Test Connector Configuration**. +1. Review the test results and select **Save**. + +### Step two: Enable synchronization + +1. In Okta, select **Provisioning**. +1. Select **To App**, then **Edit**. +1. Enable **Create Users**, **Update User Attributes**, and **Deactivate Users**. +1. Select **Save**. +1. Remove unnecessary mappings. The necessary mappings are: + - Username + - Given name + - Family name + - Email + +{{< /tab >}} +{{< tab name="Entra ID (OIDC)" >}} + +Microsoft does not support SCIM and OIDC in the same non-gallery application. +You must create a second non-gallery application in Entra ID for SCIM +provisioning. + +### Step one: Create a separate SCIM app + +1. In the Azure Portal, go to **Microsoft Entra ID** > **Enterprise Applications** > +**New application**. +1. Select **Create your own application**. +1. Name your application and choose **Integrate any other application you don't find in the gallery**. +1. Select **Create**. + +### Step two: Configure SCIM provisioning + +1. In your new SCIM application, go to **Provisioning** > **Get started**. +1. Set **Provisioning Mode** to **Automatic**. +1. Under **Admin Credentials**: + - **Tenant URL**: Paste the **SCIM Base URL** from Docker. + - **Secret Token**: Paste the **SCIM API token** from Docker. +1. Select **Test Connection** to verify. +1. Select **Save** to store credentials. + +Next, [set up role mapping](#set-up-role-mapping). + +{{< /tab >}} +{{< tab name="Entra ID (SAML 2.0)" >}} + +### Configure SCIM provisioning + +1. In the Azure Portal, go to **Microsoft Entra ID** > **Enterprise Applications**, +and select your Docker SAML app. +1. Select **Provisioning** > **Get started**. +1. Set **Provisioning Mode** to **Automatic**. +1. Under **Admin Credentials**: + - **Tenant URL**: Paste the **SCIM Base URL** from Docker. + - **Secret Token**: Paste the **SCIM API token** from Docker. +1. Select **Test Connection** to verify. +1. Select **Save** to store credentials. + +Next, [set up role mapping](#set-up-role-mapping). + +{{< /tab >}} +{{< /tabs >}} + +## Set up role mapping + +You can assign [Docker roles](../roles-and-permissions.md) to +users by adding optional SCIM attributes in your IdP. These attributes override +default role and team values set in your SSO configuration. + +> [!NOTE] +> +> Role mappings are supported for both SCIM and Just-in-Time (JIT) +provisioning. For JIT, role mapping applies only when the user is first +provisioned. + +The following table lists the supported optional user-level attributes: + +| Attribute | Possible values | Notes | +| --------- | ------------------ | -------------- | +| `dockerRole` | `member`, `editor`, or `owner` | If not set, the user defaults to the `member` role. Setting this attribute overrides the default.

For role definitions, see [Roles and permissions](../roles-and-permissions.md). | +| `dockerOrg` | Docker `organizationName` (e.g., `moby`) | Overrides the default organization configured in your SSO connection.

If unset, the user is provisioned to the default organization. If `dockerOrg` and `dockerTeam` are both set, the user is provisioned to the team within the specified organization. | +| `dockerTeam` | Docker `teamName` (e.g., `developers`) | Provisions the user to the specified team in the default or specified organization. If the team doesn't exist, it is automatically created.

You can still use [group mapping](group-mapping.md) to assign users to multiple teams across organizations. | + +The external namespace used for these attributes is: `urn:ietf:params:scim:schemas:extension:docker:2.0:User`. +This value is required in your IdP when creating custom SCIM attributes for Docker. + +{{< tabs >}} +{{< tab name="Okta" >}} + +### Step one: Set up role mapping in Okta + +1. Setup [SSO](../single-sign-on/configure/_index.md) and SCIM first. +1. In the Okta admin portal, go to **Directory**, select **Profile Editor**, and then **User (Default)**. +1. Select **Add Attribute** and configure the values for the role, organization, or team you want to add. Exact naming isn't required. +1. Return to the **Profile Editor** and select your application. +1. Select **Add Attribute** and enter the required values. The **External Name** and **External Namespace** must be exact. The external name values for organization/team/role mapping are `dockerOrg`, `dockerTeam`, and `dockerRole` respectively, as listed in the previous table. The external namespace is the same for all of them: `urn:ietf:params:scim:schemas:extension:docker:2.0:User`. +1. After creating the attributes, navigate to the top of the page and select **Mappings**, then **Okta User to YOUR APP**. +1. Go to the newly created attributes and map the variable names to the external names, then select **Save Mappings**. If you’re using JIT provisioning, continue to the following steps. +1. Navigate to **Applications** and select **YOUR APP**. +1. Select **General**, then **SAML Settings**, and **Edit**. +1. Select **Step 2** and configure the mapping from the user attribute to the Docker variables. + +### Step two: Assign roles by user + +1. In the Okta Admin portal, select **Directory**, then **People**. +1. Select **Profile**, then **Edit**. +1. Select **Attributes** and update the attributes to the desired values. + +### Step three: Assign roles by group + +1. In the Okta Admin portal, select **Directory**, then **People**. +1. Select **YOUR GROUP**, then **Applications**. +1. Open **YOUR APPLICATION** and select the **Edit** icon. +1. Update the attributes to the desired values. + +If a user doesn't already have attributes set up, users who are added to the group will inherit these attributes upon provisioning. + +{{< /tab >}} +{{< tab name="Entra ID/Azure AD (SAML 2.0 and OIDC)" >}} + +### Step one: Configure attribute mappings + +1. Complete the [SCIM provisioning setup](#enable-scim-in-docker). +1. In the Azure Portal, open **Microsoft Entra ID** > **Enterprise Applications**, +and select your SCIM application. +1. Go to **Provisioning** > **Mappings** > **Provision Azure Active Directory Users**. +1. Add or update the following mappings: + - `userPrincipalName` -> `userName` + - `mail` -> `emails.value` + - Optional. Map `dockerRole`, `dockerOrg`, or `dockerTeam` using one of the + [mapping methods](#step-two-choose-a-role-mapping-method). +1. Remove any unsupported attributes to prevent sync errors. +1. Optional. Go to **Mappings** > **Provision Azure Active Directory Groups**: + - If group provisioning causes errors, set **Enabled** to **No**. + - If enabling, test group mappings carefully. +1. Select **Save** to apply mappings. + +### Step two: Choose a role mapping method + +You can map `dockerRole`, `dockerOrg`, or `dockerTeam` using one of the following +methods: + +#### Expression mapping + +Use this method if you only need to assign Docker roles like `member`, `editor`, +or `owner`. + +1. In the **Edit Attribute** view, set the mapping type to **Expression**. +1. In the **Expression** field: + 1. If your App Roles match Docker roles exactly, use: SingleAppRoleAssignment([appRoleAssignments]) + 1. If they don't match, use a switch expression: `Switch(SingleAppRoleAssignment([appRoleAssignments]), "My Corp Admins", "owner", "My Corp Editors", "editor", "My Corp Users", "member")` +1. Set: + - **Target attribute**: `urn:ietf:params:scim:schemas:extension:docker:2.0:User:dockerRole` + - **Match objects using this attribute**: No + - **Apply this mapping**: Always +1. Save your changes. + +> [!WARNING] +> +> You can't use `dockerOrg` or `dockerTeam` with this method. Expression mapping +is only compatible with one attribute. + +#### Direct mapping + +Use this method if you need to map multiple attributes (e.g., `dockerRole` + +`dockerTeam`). + +1. For each Docker attribute, choose a unique Entra extension attribute (e.g., +`extensionAttribute1`, `extensionAttribute2`, etc.). +1. In the **Edit Attribute** view: + - Set mapping type to **Direct**. + - Set **Source attribute** to your selected extension attribute (e.g., `extensionAttribute1`). + - Set **Target attribute** to one of: + - `dockerRole: urn:ietf:params:scim:schemas:extension:docker:2.0:User:dockerRole` + - `dockerOrg: urn:ietf:params:scim:schemas:extension:docker:2.0:User:dockerOrg` + - `dockerTeam: urn:ietf:params:scim:schemas:extension:docker:2.0:User:dockerTeam` + - Set **Apply this mapping** to **Always**. +1. Save your changes. + +To assign values, you'll need to use the Microsoft Graph API. + +### Step three: Assign users and groups + +For either mapping method: + +1. In the SCIM app, go to **Users and Groups** > **Add user/group**. +1. Select the users or groups to provision to Docker. +1. Select **Assign**. + +If you're using expression mapping: + +1. Go to **App registrations** > your SCIM app > **App Roles**. +1. Create App Roles that match Docker roles. +1. Assign users or groups to App Roles under **Users and Groups**. + +If you're using direct mapping: + +1. Go to [Microsoft Graph Explorer](https://developer.microsoft.com/en-us/graph/graph-explorer) +and sign in as a tenant admin. +1. Use Microsoft Graph API to assign attribute values. Example PATCH request: + +```bash +PATCH https://graph.microsoft.com/v1.0/users/{user-id} +Content-Type: application/json + +{ + "extensionAttribute1": "owner", + "extensionAttribute2": "moby", + "extensionAttribute3": "developers" +} +``` + +> [!NOTE] +> +> You must use a different extension attribute for each SCIM field. + +{{< /tab >}} +{{< /tabs >}} + +See the documentation for your IdP for additional details: + +- [Okta](https://help.okta.com/en-us/Content/Topics/users-groups-profiles/usgp-add-custom-user-attributes.htm) +- [Entra ID/Azure AD](https://learn.microsoft.com/en-us/azure/active-directory/app-provisioning/customize-application-attributes#provisioning-a-custom-extension-attribute-to-a-scim-compliant-application) + +## Test SCIM provisioning + +After completing role mapping, you can test the configuration manually. + + +{{< tabs >}} +{{< tab name="Okta" >}} + +1. In the Okta admin portal, go to **Directory > People**. +1. Select a user you've assigned to your SCIM application. +1. Select **Provision User**. +1. Wait a few seconds, then check the Docker +[Admin Console](https://app.docker.com/admin) under **Members**. +1. If the user doesn’t appear, review logs in **Reports > System Log** and +confirm SCIM settings in the app. + +{{< /tab >}} +{{< tab name="Entra ID/Azure AD (OIDC and SAML 2.0)" >}} + +1. In the Azure Portal, go to **Microsoft Entra ID** > **Enterprise Applications**, +and select your SCIM app. +1. Go to **Provisioning** > **Provision on demand**. +1. Select a user or group and choose **Provision**. +1. Confirm that the user appears in the Docker +[Admin Console](https://app.docker.com/admin) under **Members**. +1. If needed, check **Provisioning logs** for errors. + +{{< /tab >}} +{{< /tabs >}} + +## Disable SCIM + +If SCIM is disabled, any user provisioned through SCIM will remain in the organization. Future changes for your users will not sync from your IdP. User de-provisioning is only possible when manually removing the user from the organization. + +{{< tabs >}} +{{< tab name="Admin Console" >}} + +{{% admin-scim-disable product="admin" %}} + +{{< /tab >}} +{{< tab name="Docker Hub" >}} + +{{% include "hub-org-management.md" %}} + +{{% admin-scim-disable %}} + +{{< /tab >}} +{{< /tabs >}} + +## More resources + +The following videos demonstrate how to configure SCIM for your IdP: + +- [Video: Configure SCIM with Okta](https://youtu.be/c56YECO4YP4?feature=shared&t=1314) +- [Video: Attribute mapping with Okta](https://youtu.be/c56YECO4YP4?feature=shared&t=1998) +- [Video: Configure SCIM with Entra ID/Azure AD](https://youtu.be/bGquA8qR9jU?feature=shared&t=1668) +- [Video: Attribute and group mapping with Entra ID/Azure AD](https://youtu.be/bGquA8qR9jU?feature=shared&t=2039) + +Refer to the following troubleshooting guide if needed: + +- [Troubleshoot provisioning](/manuals/enterprise/troubleshoot/troubleshoot-provisioning.md) diff --git a/content/manuals/security/for-admins/roles-and-permissions.md b/content/manuals/enterprise/security/roles-and-permissions.md similarity index 59% rename from content/manuals/security/for-admins/roles-and-permissions.md rename to content/manuals/enterprise/security/roles-and-permissions.md index 7b5295b52597..3c79802a3b4c 100644 --- a/content/manuals/security/for-admins/roles-and-permissions.md +++ b/content/manuals/enterprise/security/roles-and-permissions.md @@ -1,30 +1,35 @@ --- -description: > - Use roles in your organization to control who has access to content, - registry, and organization management permissions. +description: Use roles in your organization to control who has access to content, registry, and organization management permissions. keywords: members, teams, organization, company, roles, access, docker hub, admin console, security title: Roles and permissions aliases: - /docker-hub/roles-and-permissions/ +- /security/for-admins/roles-and-permissions/ weight: 40 --- {{< summary-bar feature_name="General admin" >}} -Organization and company owners can assign roles to individuals giving them different permissions in the organization. This guide outlines Docker's organization roles and their permission scopes. +This guide outlines Docker's organization roles and their permission scopes. ## Roles -When you invite users to your organization, you assign them a role. A role is a collection of permissions. Roles define whether users can create repositories, pull images, create teams, and configure organization settings. +When you invite users to your organization, you assign them a role. A role is a +collection of permissions. Roles define whether users can create repositories, +pull images, create teams, and configure organization settings. The following roles are available to assign: -- Member: Non-administrative role. Members can view other members that are in the same organization. -- Editor: Partial administrative access to the organization. Editors can create, edit, and delete repositories. They can also edit an existing team's access permissions. -- Organization owner: Full organization administrative access. Organization owners can manage organization repositories, teams, members, settings, and billing. -- Company owner: In addition to the permissions of an organization owner, company owners can configure settings for their associated organizations. +- Member: Non-administrative role. Members can view other members that are in +the same organization. +- Editor: Partial administrative access to the organization. Editors can +create, edit, and delete repositories. They can also edit an existing team's +access permissions. +- Owner: Full organization administrative access. Owners can manage organization +repositories, teams, members, settings, and billing. Owners can manage roles for members of an organization using Docker Hub or the Admin Console: + - Update a member role in [Docker Hub](/manuals/admin/organization/members.md#update-a-member-role) - Update an organization's members or company in the [Admin Console](/manuals/admin/company/users.md#update-a-member-role) - Learn more about [organizations and companies](/manuals/admin/_index.md) @@ -33,15 +38,18 @@ Owners can manage roles for members of an organization using Docker Hub or the A > [!NOTE] > -> Company owners have the same access as organization owners for all associated organizations. For more information, see [Company overview](/admin/company/). +> Company owners have the same access as owners for all associated organizations. For more information, see [Company overview](/admin/company/). The following sections describe the permissions for each role. ### Content and registry permissions -The following table outlines content and registry permissions for member, editor, and organization owner roles. These permissions and roles apply to the entire organization, including all the repositories in the namespace for the organization. +The following table outlines content and registry permissions for member, +editor, and owner roles. These permissions and roles apply to the entire +organization, including all the repositories in the namespace for the +organization. -| Permission | Member | Editor | Organization owner | +| Permission | Member | Editor | Owner | | :---------------------------------------------------- | :----- | :----- | :----------------- | | Explore images and extensions | ✅ | ✅ | ✅ | | Star, favorite, vote, and comment on content | ✅ | ✅ | ✅ | @@ -58,45 +66,52 @@ The following table outlines content and registry permissions for member, editor | View teams | ✅ | ✅ | ✅ | | Assign team permissions to repositories | ❌ | ✅ | ✅ | -When you add members to a team, you can manage their repository permissions. For team repository permissions, see [Create and manage a team permissions reference](/manuals/admin/organization/manage-a-team.md#permissions-reference). +When you add members to a team, you can manage their repository permissions. +For team repository permissions, see [Create and manage a team permissions reference](/manuals/admin/organization/manage-a-team.md#permissions-reference). -The following diagram provides an example of how permissions may work for a user. In this example, the first permission check is for the role: member or editor. Editors have administrative permissions for repositories across the namespace of the organization. Members may have administrative permissions for a repository if they're a member of a team that grants those permissions. +The following diagram provides an example of how permissions may work for a +user. In this example, the first permission check is for the role: member or +editor. Editors have administrative permissions for repositories across the +namespace of the organization. Members may have administrative permissions for +a repository if they're a member of a team that grants those permissions. ![User repository permissions within an organization](../images/roles-and-permissions-member-editor-roles.png) ### Organization management permissions -The following table outlines organization management permissions for member, editor, organization owner, and company owner roles. - -| Permission | Member | Editor | Organization owner | Company owner | -| :---------------------------------------------------------------- | :----- | :----- | :----------------- | :------------ | -| Create teams | ❌ | ❌ | ✅ | ✅ | -| Manage teams (including delete) | ❌ | ❌ | ✅ | ✅ | -| Configure the organization's settings (including linked services) | ❌ | ❌ | ✅ | ✅ | -| Add organizations to a company | ❌ | ❌ | ✅ | ✅ | -| Invite members | ❌ | ❌ | ✅ | ✅ | -| Manage members | ❌ | ❌ | ✅ | ✅ | -| Manage member roles and permissions | ❌ | ❌ | ✅ | ✅ | -| View member activity | ❌ | ❌ | ✅ | ✅ | -| Export and reporting | ❌ | ❌ | ✅ | ✅ | -| Image Access Management | ❌ | ❌ | ✅ | ✅ | -| Registry Access Management | ❌ | ❌ | ✅ | ✅ | -| Set up Single Sign-On (SSO) and SCIM | ❌ | ❌ | ✅ \* | ✅ | -| Require Docker Desktop sign-in | ❌ | ❌ | ✅ \* | ✅ | -| Manage billing information (for example, billing address) | ❌ | ❌ | ✅ | ✅ | -| Manage payment methods (for example, credit card or invoice) | ❌ | ❌ | ✅ | ✅ | -| View billing history | ❌ | ❌ | ✅ | ✅ | -| Manage subscriptions | ❌ | ❌ | ✅ | ✅ | -| Manage seats | ❌ | ❌ | ✅ | ✅ | -| Upgrade and downgrade plans | ❌ | ❌ | ✅ | ✅ | +The following table outlines organization management permissions for member, +editor, owner, and company owner roles. + +| Permission | Member | Editor | Owner | +| :---------------------------------------------------------------- | :----- | :----- | :----------------- | +| Create teams | ❌ | ❌ | ✅ | +| Manage teams (including delete) | ❌ | ❌ | ✅ | +| Configure the organization's settings (including linked services) | ❌ | ❌ | ✅ | +| Add organizations to a company | ❌ | ❌ | ✅ | +| Invite members | ❌ | ❌ | ✅ | +| Manage members | ❌ | ❌ | ✅ | +| Manage member roles and permissions | ❌ | ❌ | ✅ | +| View member activity | ❌ | ❌ | ✅ | +| Export and reporting | ❌ | ❌ | ✅ | +| Image Access Management | ❌ | ❌ | ✅ | +| Registry Access Management | ❌ | ❌ | ✅ | +| Set up Single Sign-On (SSO) and SCIM | ❌ | ❌ | ✅ \* | +| Require Docker Desktop sign-in | ❌ | ❌ | ✅ \* | +| Manage billing information (for example, billing address) | ❌ | ❌ | ✅ | +| Manage payment methods (for example, credit card or invoice) | ❌ | ❌ | ✅ | +| View billing history | ❌ | ❌ | ✅ | +| Manage subscriptions | ❌ | ❌ | ✅ | +| Manage seats | ❌ | ❌ | ✅ | +| Upgrade and downgrade plans | ❌ | ❌ | ✅ | _\* If not part of a company_ ### Docker Scout permissions -The following table outlines Docker Scout management permissions for member, editor, and organization owner roles. +The following table outlines Docker Scout management permissions for member, +editor, and owner roles. -| Permission | Member | Editor | Organization owner | +| Permission | Member | Editor | Owner | | :---------------------------------------------------- | :----- | :----- | :----------------- | | View and compare analysis results | ✅ | ✅ | ✅ | | Upload analysis records | ✅ | ✅ | ✅ | @@ -106,16 +121,13 @@ The following table outlines Docker Scout management permissions for member, edi ### Docker Build Cloud permissions -The following table outlines Docker Build Cloud management permissions for member, editor, and organization owner roles. +The following table outlines Docker Build Cloud management permissions for +member, editor, and owner roles. -| Permission | Member | Editor | Organization owner | +| Permission | Member | Editor | Owner | | ---------------------------- | :----- | :----- | :----------------- | -| Sign up for starter plan | ✅ | ✅ | ✅ | -| Use a cloud builder | ✅ \* | ✅ \* | ✅ \* | -| Manage seat allocation | ✅ | ✅ | ✅ | +| Use a cloud builder | ✅ | ✅ | ✅ | | Create and remove builders | ✅ | ✅ | ✅ | -| Buy seats or reduce seat cap | ❌ | ❌ | ✅ | +| Configure builder settings | ✅ | ✅ | ✅ | | Buy minutes | ❌ | ❌ | ✅ | | Manage subscription | ❌ | ❌ | ✅ | - -_\* Requires a Docker Build Cloud seat allocation_ diff --git a/content/manuals/security/for-admins/single-sign-on/_index.md b/content/manuals/enterprise/security/single-sign-on/_index.md similarity index 77% rename from content/manuals/security/for-admins/single-sign-on/_index.md rename to content/manuals/enterprise/security/single-sign-on/_index.md index 5977b7990a31..ba0cb4a254a9 100644 --- a/content/manuals/security/for-admins/single-sign-on/_index.md +++ b/content/manuals/enterprise/security/single-sign-on/_index.md @@ -7,12 +7,13 @@ aliases: - /single-sign-on/ - /admin/company/settings/sso/ - /admin/organization/security-settings/sso-management/ +- /security/for-admins/single-sign-on/ weight: 10 --- {{< summary-bar feature_name="SSO" >}} -Single sign-on (SSO) lets users access Docker by authenticating using their identity providers (IdPs). SSO is available for a whole company, and all associated organizations within that company, or an individual organization that has a Docker Business subscription. To upgrade your existing account to a Docker Business subscription, see [Upgrade your subscription](/subscription/upgrade/). +Single sign-on (SSO) lets users access Docker by authenticating using their identity providers (IdPs). SSO is available for a whole company, and all associated organizations within that company, or an individual organization that has a Docker Business subscription. To upgrade your existing account to a Docker Business subscription, see [Upgrade your subscription](/manuals/subscription/change.md). ## How SSO works @@ -25,13 +26,13 @@ The following diagram shows how SSO operates and is managed in Docker Hub and Do ## How to set it up SSO is configured using the following steps: -1. [Configure SSO](../single-sign-on/configure.md) by creating and verifying a domain in Docker. -2. [Create your SSO connection](../single-sign-on/connect.md) in Docker and your IdP. +1. [Configure SSO](configure.md) by creating and verifying a domain in Docker. +2. [Create your SSO connection](connect.md) in Docker and your IdP. 3. Cross-connect Docker and your IdP. 4. Test your connection. 5. Provision users. 6. Optional. [Enforce sign-in](../enforce-sign-in/_index.md). -7. [Manage your SSO configuration](../single-sign-on/manage.md). +7. [Manage your SSO configuration](manage.md). Once your SSO configuration is complete, a first-time user can sign in to Docker Hub or Docker Desktop using their company's domain email address. Once they sign in, they are added to your company, assigned to an organization, and if necessary, assigned to a team. @@ -40,12 +41,12 @@ Once your SSO configuration is complete, a first-time user can sign in to Docker Before configuring SSO, ensure you meet the following prerequisites: * Notify your company about the new SSO sign in procedures. * Verify that all users have Docker Desktop version 4.4.2 or later installed. -* If your organization is planning to [enforce SSO](/manuals/security/for-admins/single-sign-on/connect.md#optional-enforce-sso), members using the Docker CLI are required to [create a Personal Access Token (PAT)](/docker-hub/access-tokens/). The PAT will be used instead of their username and password. Docker plans to deprecate signing in to the CLI with a password in the future, so using a PAT will be required to prevent issues with authentication. For more details see the [security announcement](/security/security-announcements/#deprecation-of-password-logins-on-cli-when-sso-enforced). +* If your organization is planning to [enforce SSO](/manuals/enterprise/security/single-sign-on/connect.md#optional-enforce-sso), members using the Docker CLI are required to [create a Personal Access Token (PAT)](/docker-hub/access-tokens/). The PAT will be used instead of their username and password. Docker plans to deprecate signing in to the CLI with a password in the future, so using a PAT will be required to prevent issues with authentication. For more details see the [security announcement](/manuals/security/security-announcements.md#deprecation-of-password-logins-on-cli-when-sso-enforced). * Ensure all your Docker users have a valid user on your IdP with the same email address as their Unique Primary Identifier (UPN). * Confirm that all CI/CD pipelines have replaced their passwords with PATs. * For your service accounts, add your additional domains or enable it in your IdP. ## What's next? -- Start [configuring SSO](../../for-admins/single-sign-on/configure.md) in Docker -- Explore the [FAQs](../../../security/faqs/single-sign-on/_index.md) +- Start [configuring SSO](configure.md) in Docker +- Explore the [FAQs](/manuals/security/faqs/_index.md) diff --git a/content/manuals/enterprise/security/single-sign-on/configure.md b/content/manuals/enterprise/security/single-sign-on/configure.md new file mode 100644 index 000000000000..7630928572d0 --- /dev/null +++ b/content/manuals/enterprise/security/single-sign-on/configure.md @@ -0,0 +1,106 @@ +--- +description: Learn how to configure single sign-on for your organization or company. +keywords: configure, sso, docker hub, hub, docker admin, admin, security +title: Configure single sign-on +linkTitle: Configure +aliases: + - /docker-hub/domains/ + - /docker-hub/sso-connection/ + - /docker-hub/enforcing-sso/ + - /single-sign-on/configure/ + - /admin/company/settings/sso-configuration/ + - /admin/organization/security-settings/sso-configuration/ + - /security/for-admins/single-sign-on/configure/ +--- + +{{< summary-bar feature_name="SSO" >}} + +Get started creating a single sign-on (SSO) connection for your organization or company. This guide walks through the steps to add and verify the domains your members use to sign in to Docker. + +## Step one: Add your domain + +> [!NOTE] +> +> Docker supports multiple identity provider (IdP) configurations. With a multiple IdP configuration, one domain can be associated with more than one SSO identity provider. + +{{< tabs >}} +{{< tab name="Admin Console" >}} + +1. Sign in to [Docker Home](https://app.docker.com) and choose your +organization. Note that when an organization is part of a company, you must +select the company and configure the domain for the organization at the company level. +1. Select **Admin Console**, then **Domain management**. +1. Select **Add a domain**. +1. Enter your domain in the text box and select **Add domain**. +1. The pop-up modal will prompt you with steps to verify your domain. Copy the **TXT Record Value**. + +{{< /tab >}} +{{< tab name="Docker Hub" >}} + +{{% include "hub-org-management.md" %}} + +1. Sign in to [Docker Hub](https://hub.docker.com/). +1. Select **My Hub** and then your organization from the list. +1. On your organization page, select **Settings** and then **Security**. +1. Select **Add a domain**. +1. Enter your domain in the text box and select **Add domain**. +1. The pop-up modal will prompt you with steps to verify your domain. Copy the **TXT Record Value**. + +{{< /tab >}} +{{< /tabs >}} + +## Step two: Verify your domain + +Verifying your domain ensures Docker knows you own it. To verify, you add a TXT record to your Domain Name System (DNS) host using the value Docker provides. The TXT Record Value proves ownership, which signals the DNS to add this record. It can take up to 72 hours for DNS to recognize the change. When the change is reflected in DNS, Docker automatically checks the record to confirm your ownership. + +Use the **TXT Record Value** provided by Docker and follow the steps based on your DNS host. If your provider isn't listed, use the instructions for other providers. + +> [!TIP] +> +> The record name field controls where the TXT record is applied in your domain, for example root or subdomain. In general, refer to the following tips for adding a record name: +> +> - Use `@` or leave the record name empty for root domains like `example.com`, depending on your provider. +> - Don't enter values like `docker`, `docker-verification`, `www`, or your domain name. These values may direct to the wrong place. +> +> Check your DNS provider's documentation to verify record name requirements. + +{{< tabs >}} +{{< tab name="AWS Route 53" >}} + +1. To add your TXT record to AWS, see [Creating records by using the Amazon Route 53 console](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resource-record-sets-creating.html). +1. TXT record verification can take 72 hours. Once you have waited for TXT record verification, return to the **Domain management** page of the [Admin Console](https://app.docker.com/admin) and select **Verify** next to your domain name. + +{{< /tab >}} +{{< tab name="Google Cloud DNS" >}} + +1. To add your TXT record to Google Cloud DNS, see [Verifying your domain with a TXT record](https://cloud.google.com/identity/docs/verify-domain-txt). +1. TXT record verification can take 72 hours. Once you have waited for TXT record verification, return to the **Domain management** page of the [Admin Console](https://app.docker.com/admin) and select **Verify** next to your domain name. + +{{< /tab >}} +{{< tab name="GoDaddy" >}} + +1. To add your TXT record to GoDaddy, see [Add a TXT record](https://www.godaddy.com/help/add-a-txt-record-19232). +1. TXT record verification can take 72 hours. Once you have waited for TXT record verification, return to the **Domain management** page of the [Admin Console](https://app.docker.com/admin) and select **Verify** next to your domain name. + +{{< /tab >}} +{{< tab name="Other providers" >}} + +1. Sign in to your domain host. +1. Add a TXT record to your DNS settings and save the record. +1. TXT record verification can take 72 hours. Once you have waited for TXT record verification, return to the **Domain management** page of the [Admin Console](https://app.docker.com/admin) and select **Verify** next to your domain name. + +{{< /tab >}} +{{< /tabs >}} + +Once you have added and verified your domain, you are ready to create an SSO connection between Docker and your identity provider (IdP). + +## More resources + +The following videos walk through verifying your domain to create your SSO connection in Docker. + +- [Video: Verify your domain for SSO with Okta](https://youtu.be/c56YECO4YP4?feature=shared&t=529) +- [Video: Verify your domain for SSO with Azure AD (OIDC)](https://youtu.be/bGquA8qR9jU?feature=shared&t=496) + +## What's next? + +[Connect Docker and your IdP](connect.md). diff --git a/content/manuals/enterprise/security/single-sign-on/connect.md b/content/manuals/enterprise/security/single-sign-on/connect.md new file mode 100644 index 000000000000..85f50dcde1fa --- /dev/null +++ b/content/manuals/enterprise/security/single-sign-on/connect.md @@ -0,0 +1,252 @@ +--- +description: Learn how to complete your single-sign on connection and next steps for enabling SSO. +keywords: configure, sso, docker hub, hub, docker admin, admin, security +title: Create an SSO connection +linkTitle: Connect +aliases: + - /security/for-admins/single-sign-on/connect/ +--- + +{{< summary-bar feature_name="SSO" >}} + +Creating a single sign-on (SSO) connection requires setting up the connection in Docker first, followed by setting up the connection in your identity provider (IdP). This guide provides steps for setting up your SSO connection in Docker and your IdP. + +> [!TIP] +> +> This guide requires copying and pasting values in both Docker and your IdP. To ensure a seamless connection process, complete all the steps in this guide in one session and keep separate browsers open for both Docker and your IdP. + +## Prerequisites + +Make sure you have completed the following before you begin: + +- Your domain is verified +- You have an account set up with an IdP +- You have completed the steps in the [Configure single sign-on](configure.md) guide + +## Step one: Create an SSO connection in Docker + +>[!NOTE] +> +> Before creating an SSO connection in Docker, you must verify at least one domain. + +{{< tabs >}} +{{< tab name="Admin Console" >}} + +1. Sign in to [Docker Home](https://app.docker.com) and choose your +organization. Note that when an organization is part of a company, you must +select the company and configure the domain for the organization at the company level. +1. Select **Admin Console**, then **SSO and SCIM**. +1. Select **Create Connection** and provide a name for the connection. +1. Select an authentication method, **SAML** or **Azure AD (OIDC)**. +1. Copy the following fields to add to your IdP: + - Okta SAML: **Entity ID**, **ACS URL** + - Azure OIDC: **Redirect URL** +1. Keep this window open so you can paste the connection information from your IdP here at the end of this guide. + +{{< /tab >}} +{{< tab name="Docker Hub" >}} + +{{% include "hub-org-management.md" %}} + +1. Sign in to Docker Hub. +1. Select **My Hub** and then your organization from the list. +1. On your organization page, select **Settings** and then **Security**. +1. In the SSO connection table, select **Create Connection** and provide a name for the connection. +1. Select an authentication method, **SAML** or **Azure AD (OIDC)**. +1. Copy the following fields to add to your IdP: + - Okta SAML: **Entity ID**, **ACS URL** + - Azure OIDC: **Redirect URL** +1. Keep this window open so you can paste the connection information from your IdP here at the end of this guide. + +{{< /tab >}} +{{< /tabs >}} + +## Step two: Create an SSO connection in your IdP + +The user interface for your IdP may differ slightly from the following steps. Refer to the documentation for your IdP to verify. + +{{< tabs >}} +{{< tab name="Okta SAML" >}} + +1. Sign in to your Okta account. +1. Select **Admin** to open the Okta Admin portal. +1. From the left-hand navigation, select **Administration**. +1. Select **Administration** and then **Create App Integration**. +1. Select **SAML 2.0** and then **Next**. +1. Enter "Docker Hub" as your **App Name**. +1. Optional. Upload a logo. +1. Select **Next**. +1. Enter the following values from Docker into their corresponding Okta fields: + - Docker ACS URL: **Single Sign On URL** + - Docker Entity ID: **Audience URI (SP Entity ID)** +1. Configure the following settings in Okta: + - Name ID format: `EmailAddress` + - Application username: `Email` + - Update application on: `Create and update` +1. Optional. Add SAML attributes. See [SSO attributes](/manuals/enterprise/security/provisioning/_index.md#sso-attributes) for a table of SSO attributes. +1. Select **Next**. +1. Select the **This is an internal app that we have created** checkbox. +1. Select **Finish**. + +{{< /tab >}} +{{< tab name="Entra ID SAML 2.0" >}} + +1. Sign in to your Azure AD admin portal. +1. Select **Default Directory** and then **Add**. +1. Choose **Enterprise Application** and select **Create your own application**. +1. Enter "Docker" for application name and select the **non-gallery** option. +1. After the application is created, go to **Single Sign-On** and select **SAML**. +1. Select **Edit** on the **Basic SAML configuration** section. +1. Enter the following values from Docker into their corresponding Azure fields: + - Docker Entity ID: **Identifier** + - Docker ACS URL: **Reply URL** +1. Optional. Add SAML attributes. See [SSO attributes](/manuals/enterprise/security/provisioning/_index.md#sso-attributes) for a table of SSO attributes. +1. Save configuration. +1. From the **SAML Signing Certificate** section, download your **Certificate (Base64)**. + +{{< /tab >}} +{{< tab name="Azure Connect (OIDC)" >}} + +To create an Azure Connect (OIDC) connection, you must create an app registration, client secrets, and configure API permissions for Docker: + +### Create app registration + +1. Sign in to your Azure AD admin portal. +1. Select **App Registration** and then **New Registration**. +1. Enter "Docker Hub SSO" or similar for application name. +1. Under **Supported account types**, specify who can use this application or access the app. +1. In the **Redirect URI** section, select **Web** from the drop-down menu and paste the **Redirect URI** value from the Docker console into this field. +1. Select **Register** to register the app. +1. Copy the **Client ID** from the app's overview page. You need this information to continue configuring SSO in Docker. + +### Create client secrets + +1. Open your app in Azure AD and select **Certificates & secrets**. +1. Select **+ New client secret**. +1. Specify the description of the secret and set how long keys can be used. +1. Select **Add** to continue. +1. Copy the secret **Value** field. You need this to continue configuring SSO in Docker. + +### Configure API permissions + +1. Open your app in Azure AD and navigate to your app settings. +1. Select **API permission** and then **Grant admin consent for [your tenant name]**. +1. Select **Yes** to confirm. +1. After confirming, select **Add a permission** and then **Delegated permissions**. +1. Search for `User.Read` and select this option. +1. Select **Add permissions** to confirm. +1. Verify admin consent was granted for each permission by checking the **Status** column. + +{{< /tab >}} +{{< /tabs >}} + +## Step three: Connect Docker and your IdP + +After creating your connection in Docker and your IdP, you can cross-connect them to complete your SSO connection: + +{{< tabs >}} +{{< tab name="Okta SAML" >}} + +1. Open your app you created in Okta and select **View SAML setup instructions**. +1. Copy the following values from the Okta SAML setup instruction page: + - **SAML Sign-in URL** + - **x509 Certificate** + + > [!IMPORTANT] + > + > You must copy the entire contents of your **x509 Certificate**, + including the `----BEGIN CERTIFICATE----` and `----END CERTIFICATE----` lines. + +1. Open Docker Hub or the Admin Console. Your SSO configuration page should still be open from Step one of this guide. +1. Select **Next** to open the **Update single-sign on connection** page. +1. Paste your Okta **SAML Sign-in URL** and **x509 Certificate** values in Docker. +1. Select **Next**. +1. Optional. Select a default team to provision users to and select **Next**. +1. Verify your SSO connection details and select **Create Connection**. + +{{< /tab >}} +{{< tab name="Entra ID SAML 2.0" >}} + +1. Open your app in Azure AD. +1. Open your downloaded **Certificate (Base64)** in a text editor. +1. Copy the following values: + - From Azure AD: **Login URL** + - Copy the contents of your **Certificate (Base64)** file from your text editor + + > [!IMPORTANT] + > + > You must copy the entire contents of your **Certificate (base64)**, + including the `----BEGIN CERTIFICATE----` and `----END CERTIFICATE----` lines. + +1. Open Docker Hub or the Admin Console. Your SSO configuration page should still be open from Step one of this guide. +1. Paste your **Login URL** and **Certificate (Base64)** values in Docker. +1. Select **Next**. +1. Optional. Select a default team to provision users to and select **Next**. +1. Verify your SSO connection details and select **Create Connection**. + +{{< /tab >}} +{{< tab name="Azure Connect (OIDC)" >}} + +1. Open Docker Hub or the Admin Console. Your SSO configuration page should still be open from Step one of this guide. +1. Paste the following values from Azure AD in to Docker: + - **Client ID** + - **Client Secret** + - **Azure AD Domain** +1. Select **Next**. +1. Optional. Select a default team to provision users to and select **Next**. +1. Verify your SSO connection details and select **Create Connection**. + +{{< /tab >}} +{{< /tabs >}} + +## Step four: Test your connection + +After you've completed the SSO connection process in Docker, we recommend testing it: + +1. Open an incognito browser. +1. Sign in to the Admin Console using your **domain email address**. +1. The browser will redirect to your identity provider's sign in page to authenticate. If you have [multiple IdPs](#optional-configure-multiple-idps), choose the sign sign-in option **Continue with SSO**. +1. Authenticate through your domain email instead of using your Docker ID. + +You can also test your SSO connection through the command-line interface (CLI). If you want to test through the CLI, your users must have a personal access token (PAT). + +## Optional: Configure multiple IdPs + +Docker supports multiple IdP configurations. With multiple IdPs configured, one domain can be associated with multiple SSO identity providers. To configure multiple IdPs, repeat steps 1-4 in this guide for each IdP. Ensure each IdP configuration uses the same domain. + +When a user signs in to a Docker organization that has multiple IdPs, on the sign-in page, they must choose the option **Continue with SSO**. This prompts them to choose their identity provider and authenticate through their domain email. + +## Optional: Enforce SSO + +> [!IMPORTANT] +> +> If SSO isn't enforced, users can choose to sign in with either their Docker username and password or SSO. + +Enforcing SSO requires users to use SSO when signing into Docker. This centralizes authentication and enforces policies set by the IdP. + +1. Sign in to [Docker Home](https://app.docker.com/) and select +your organization. Note that when an organization is part of a company, you must select the company and configure the domain for the organization at the company level. +1. Select **Admin Console**, then **SSO and SCIM**. +1. In the SSO connections table, select the **Action** icon and then **Enable enforcement**. When SSO is enforced, your users are unable to modify their email address and password, convert a user account to an organization, or set up 2FA through Docker Hub. If you want to use 2FA, you must enable 2FA through your IdP. +1. Continue with the on-screen instructions and verify you've completed all tasks. +1. Select **Turn on enforcement** to complete. + +Your users must now sign in to Docker with SSO. + +> [!NOTE] +> +> When SSO is enforced, [users can't use passwords to access the Docker CLI](/manuals/security/security-announcements.md#deprecation-of-password-logins-on-cli-when-sso-enforced). Users must use a [personal access token](/manuals/enterprise/security/access-tokens.md) (PAT) for authentication to access the Docker CLI. + +## More resources + +The following videos demonstrate how to enforce SSO. + +- [Video: Enforce SSO with Okta SAML](https://youtu.be/c56YECO4YP4?feature=shared&t=1072) +- [Video: Enforce SSO with Azure AD (OIDC)](https://youtu.be/bGquA8qR9jU?feature=shared&t=1087) + + +## What's next + +- [Provision users](/manuals/enterprise/security/provisioning/_index.md) +- [Enforce sign-in](../enforce-sign-in/_index.md) +- [Create access tokens](/manuals/enterprise/security/access-tokens.md) diff --git a/content/manuals/security/for-admins/single-sign-on/images/SSO.png b/content/manuals/enterprise/security/single-sign-on/images/SSO.png similarity index 100% rename from content/manuals/security/for-admins/single-sign-on/images/SSO.png rename to content/manuals/enterprise/security/single-sign-on/images/SSO.png diff --git a/content/manuals/security/for-admins/single-sign-on/manage.md b/content/manuals/enterprise/security/single-sign-on/manage.md similarity index 67% rename from content/manuals/security/for-admins/single-sign-on/manage.md rename to content/manuals/enterprise/security/single-sign-on/manage.md index a15d7c24f116..e3cbbbfaf79b 100644 --- a/content/manuals/security/for-admins/single-sign-on/manage.md +++ b/content/manuals/enterprise/security/single-sign-on/manage.md @@ -6,6 +6,7 @@ linkTitle: Manage aliases: - /admin/company/settings/sso-management/ - /single-sign-on/manage/ +- /security/for-admins/single-sign-on/manage/ --- {{< summary-bar feature_name="SSO" >}} @@ -16,8 +17,6 @@ aliases: > > You must have a [company](/admin/company/) to manage more than one organization. -{{< include "admin-early-access.md" >}} - {{% admin-sso-management-orgs product="admin" %}} ## Manage domains @@ -25,13 +24,13 @@ aliases: {{< tabs >}} {{< tab name="Admin Console" >}} -{{< include "admin-early-access.md" >}} - {{% admin-sso-management product="admin" %}} {{< /tab >}} {{< tab name="Docker Hub" >}} +{{% include "hub-org-management.md" %}} + {{% admin-sso-management product="hub" %}} {{< /tab >}} @@ -42,13 +41,13 @@ aliases: {{< tabs >}} {{< tab name="Admin Console" >}} -{{< include "admin-early-access.md" >}} - {{% admin-sso-management-connections product="admin" %}} {{< /tab >}} {{< tab name="Docker Hub" >}} +{{% include "hub-org-management.md" %}} + {{% admin-sso-management-connections product="hub" %}} {{< /tab >}} @@ -58,37 +57,39 @@ aliases: > [!IMPORTANT] > -> SSO has Just-In-Time (JIT) Provisioning enabled by default unless you have [disabled it](/security/for-admins/provisioning/just-in-time/#sso-authentication-with-jit-provisioning-disabled). This means your users are auto-provisioned to your organization. +> SSO has Just-In-Time (JIT) Provisioning enabled by default unless you have [disabled it](../provisioning/just-in-time/#sso-authentication-with-jit-provisioning-disabled). This means your users are auto-provisioned to your organization. > > You can change this on a per-app basis. To prevent auto-provisioning users, you can create a security group in your IdP and configure the SSO app to authenticate and authorize only those users that are in the security group. Follow the instructions provided by your IdP: > > - [Okta](https://help.okta.com/en-us/Content/Topics/Security/policies/configure-app-signon-policies.htm) > - [Entra ID (formerly Azure AD)](https://learn.microsoft.com/en-us/azure/active-directory/develop/howto-restrict-your-app-to-a-set-of-users) > -> Alternatively, see the [Provisioning overview](/manuals/security/for-admins/provisioning/_index.md) guide. +> Alternatively, see the [Provisioning overview](../provisioning/_index.md) guide. ### Add guest users when SSO is enabled To add a guest that isn't verified through your IdP: -1. Sign in to the [Admin Console](https://app.docker.com/admin). -2. Select your organization or company from the **Choose profile** page, then select **Members**. -3. Select **Invite**. -4. Follow the on-screen instructions to invite the user. +1. Sign in to [Docker Home](https://app.docker.com/) and select +your organization. +1. Select **Members**. +1. Select **Invite**. +1. Follow the on-screen instructions to invite the user. ### Remove users from the SSO company To remove a user: -1. Sign in to [Admin Console](https://app.docker.com/admin). -2. Select your organization or company from the **Choose profile** page, then select **Members**. -3. Select the action icon next to a user’s name, and then select **Remove member**, if you're an organization, or **Remove user**, if you're a company. -4. Follow the on-screen instructions to remove the user. +1. Sign in to [Docker Home](https://app.docker.com/) and select +your organization. +1. Select **Members**. +1. Select the action icon next to a user’s name, and then select **Remove member**, if you're an organization, or **Remove user**, if you're a company. +1. Follow the on-screen instructions to remove the user. ## Manage provisioning -Users are provisioned with Just-in-Time (JIT) provisioning by default. If you enable SCIM, you can disable JIT. For more information, see the [Provisioning overview](/manuals/security/for-admins/provisioning/_index.md) guide. +Users are provisioned with Just-in-Time (JIT) provisioning by default. If you enable SCIM, you can disable JIT. For more information, see the [Provisioning overview](../provisioning/_index.md) guide. ## What's next? diff --git a/content/manuals/enterprise/troubleshoot/_index.md b/content/manuals/enterprise/troubleshoot/_index.md new file mode 100644 index 000000000000..76d4281d6f40 --- /dev/null +++ b/content/manuals/enterprise/troubleshoot/_index.md @@ -0,0 +1,9 @@ +--- +build: + render: never +title: Troubleshoot +weight: 40 +params: + sidebar: + group: Enterprise +--- \ No newline at end of file diff --git a/content/manuals/enterprise/troubleshoot/troubleshoot-provisioning.md b/content/manuals/enterprise/troubleshoot/troubleshoot-provisioning.md new file mode 100644 index 000000000000..ba720ae8ec1f --- /dev/null +++ b/content/manuals/enterprise/troubleshoot/troubleshoot-provisioning.md @@ -0,0 +1,88 @@ +--- +description: Learn how to troubleshoot common user provisioning issues. +keywords: scim, jit, provision, troubleshoot, group mapping +title: Troubleshoot provisioning +linkTitle: Troubleshoot provisioning +tags: [Troubleshooting] +toc_max: 2 +aliases: + - /security/troubleshoot/troubleshoot-provisioning/ +--- + +If you experience issues with user roles, attributes, or unexpected account +behavior with user provisioning, this guide provides troubleshooting +recommendations to resolve conflicts. + +## SCIM attribute values are overwritten or ignored + +### Error message + +Typically, this scenario does not produce an error message in Docker or your +IdP. This issue ususally surfaces as incorrect role or team assignment. + +### Possible causes + +- JIT provisioning is enabled, and Docker is using values from your IdP's +SSO login flow to provision the user, which overrides +SCIM-provided attributes. +- SCIM was enabled after the user was already provisioned via JIT, so SCIM +updates don't take effect. + +### Affected environments + +- Docker organizations using SCIM with SSO +- Users provisioned via JIT prior to SCIM setup + +### Steps to replicate + +1. Enable JIT and SSO for your Docker organization. +1. Sign in to Docker as a user via SSO. +1. Enable SCIM and set role/team attributes for that user. +1. SCIM attempts to update the user's attributes, but the role or team +assignment does not reflect changes. + +### Solutions + +#### Disable JIT provisioning (recommended) + +1. Sign in to [Docker Home](https://app.docker.com/). +1. Select **Admin Console**, then **SSO and SCIM**. +1. Find the relevant SSO connection. +1. Select the **actions menu** and choose **Edit**. +1. Disable **Just-in-Time provisioning**. +1. Save your changes. + +With JIT disabled, Docker uses SCIM as the source of truth for user creation +and role assignment. + +**Keep JIT enabled and match attributes** + +If you prefer to keep JIT enabled: + +- Make sure your IdP's SSO attribute mappings match the values being sent +by SCIM. +- Avoid configuring SCIM to override attributes already set via JIT. + +This option requires strict coordination between SSO and SCIM attributes +in your IdP configuration. + +## SCIM updates don't apply to existing users + +### Possible causes + +User accounts were originally created manually or via JIT, and SCIM is not +linked to manage them. + +### Solution + +SCIM only manages users that it provisions. To allow SCIM to manage an +existing user: + +1. Remove the user manually from the Docker [Admin Console](https://app.docker.com/admin). +1. Trigger provisioning from your IdP. +1. SCIM will re-create the user with correct attributes. + +> [!WARNING] +> +> Deleting a user removes their resource ownership (e.g., repositories). +Transfer ownership before removing the user. diff --git a/content/manuals/enterprise/troubleshoot/troubleshoot-sso.md b/content/manuals/enterprise/troubleshoot/troubleshoot-sso.md new file mode 100644 index 000000000000..79a16a026374 --- /dev/null +++ b/content/manuals/enterprise/troubleshoot/troubleshoot-sso.md @@ -0,0 +1,234 @@ +--- +description: Learn how to troubleshoot common SSO issues. +keywords: sso, troubleshoot, single sign-on +title: Troubleshoot single sign-on +linkTitle: Troubleshoot SSO +tags: [Troubleshooting] +toc_max: 2 +aliases: + - /security/for-admins/single-sign-on/troubleshoot/ + - /security/troubleshoot/troubleshoot-sso/ +--- + +While configuring or using single sign-on (SSO), you may encounter issues that +can stem from your identity provider (IdP) or Docker configuration. The +following sections describe some common SSO errors and possible solutions. + +## Check for errors + +If you experience issues with SSO, check both the Docker Admin Console and your identity provider (IdP) for errors first. + +### Check Docker error logs + +1. Sign in to [Docker Home](https://app.docker.com/) and select your organization. +1. Select **Admin Console**, then **SSO and SCIM**. +1. In the SSO connections table, select the **Action** menu and then **View error logs**. +1. For more details on specific errors, select **View error details** next to an error message. +1. Note any errors you see on this page for further troubleshooting. + +### Check for errors in your IdP + +1. Review your IdP’s logs or audit trails for any failed authentication or provisioning attempts. +2. Confirm that your IdP’s SSO settings match the values provided in Docker. +3. If applicable, confirm that you have configured user provisioning correctly and that it is enabled in your IdP. +4. If applicable, verify that your IdP correctly maps Docker's required user attributes. +5. Try provisioning a test user from your IdP and verify if they appear in Docker. + +For further troubleshooting, check your IdP’s documentation. You can also contact their support team for guidance on error messages. + +## Groups are not formatted correctly + +### Error message + +When this issue occurs, the following error message is common: +```text +Some of the groups assigned to the user are not formatted as ':'. Directory groups will be ignored and user will be provisioned into the default organization and team. +``` + +### Possible causes + +- Incorrect group name formatting in your identity provider (IdP): Docker requires groups to follow the format `:`. If the groups assigned to a user do not follow this format, they will be ignored. +- Non-matching groups between IdP and Docker organization: If a group in your IdP does not have a corresponding team in Docker, it will not be recognized, and the user will be placed in the default organization and team. + +### Affected environments + +- Docker single sign-on setup using IdPs such as Okta or Azure AD +- Organizations using group-based role assignments in Docker + +### Steps to replicate + +To replicate this issue: +1. Attempt to sign in to Docker using SSO. +2. The user is assigned groups in the IdP but does not get placed in the expected Docker Team. +3. Review Docker logs or IdP logs to find the error message. + +### Solutions + +Update group names in your IdP: +1. Go to your IdP's group management section. +2. Check the groups assigned to the affected user. +3. Ensure each group follows the required format: `:` +4. Update any incorrectly formatted groups to match this pattern. +5. Save changes and retry signing in with SSO. + +## User is not assigned to the organization + +### Error message + +When this issue occurs, the following error message is common: +```text +User '$username' is not assigned to this SSO organization. Contact your administrator. TraceID: XXXXXXXXXXXXX +``` + +### Possible causes + +- User is not assigned to the organization: If Just-in-Time (JIT) provisioning is disabled, the user may not be assigned to your organization. +- User is not invited to the organization: If JIT is disabled and you do not want to enable it, the user must be manually invited. +- SCIM provisioning is misconfigured: If you use SCIM for user provisioning, it may not be correctly syncing users from your IdP. + +### Solutions + +**Enable JIT provisioning** + +JIT is enabled by default when you enable SSO. If you have JIT disabled and need +to re-enable it: + +1. Sign in to [Docker Home](https://app.docker.com/) and select your organization. +1. Select **Admin Console**, then **SSO and SCIM**. +1. In the SSO connections table, select the **Action** menu and then **Enable JIT provisioning**. +1. Select **Enable** to confirm. + +**Manually invite users** + +When JIT is disabled, users are not automatically added to your organization when they authenticate through SSO. +To manually invite users, see [Invite members](/manuals/admin/organization/members.md#invite-members) + +**Configure SCIM provisioning** + +If you have SCIM enabled, troubleshoot your SCIM connection using the following steps: + +1. Sign in to [Docker Home](https://app.docker.com/) and select your organization. +1. Select **Admin Console**, then **SSO and SCIM**. +1. In the SSO connections table, select the **Action** menu and then **View error logs**. For more details on specific errors, select **View error details** next to an error message. Note any errors you see on this page. +1. Navigate back to the **SSO and SCIM** page of the Admin Console and verify your SCIM configuration: + - Ensure that the SCIM Base URL and API Token in your IdP match those provided in the Docker Admin Console. + - Verify that SCIM is enabled in both Docker and your IdP. +1. Ensure that the attributes being synced from your IdP match Docker's [supported attributes](/manuals/enterprise/security/provisioning/scim.md#supported-attributes) for SCIM. +1. Test user provisioning by trying to provision a test user through your IdP and verify if they appear in Docker. + +## IdP-initiated sign in is not enabled for connection + +### Error message + +When this issue occurs, the following error message is common: +```text +IdP-Initiated sign in is not enabled for connection '$ssoConnection'. +``` + +### Possible causes + +Docker does not support an IdP-initiated SAML flow. This error occurs when a user attempts to authenticate from your IdP, such as using the Docker SSO app tile on the sign in page. + +### Solutions + +**Authenticate from Docker apps** + +The user must initiate authentication from Docker applications (Hub, Desktop, etc). The user needs to enter their email address in a Docker app and they will get redirected to the configured SSO IdP for their domain. + +**Hide the Docker SSO app** + +You can hide the Docker SSO app from users in your IdP. This prevents users from attempting to start authentication from the IdP dashboard. You must hide and configure this in your IdP. + +## Not enough seats in organization + +### Error message + +When this issue occurs, the following error message is common: +```text +Not enough seats in organization '$orgName'. Add more seats or contact your administrator. +``` + +### Possible causes + +This error occurs when the organization has no available seats for the user when provisioning via Just-in-Time (JIT) provisioning or SCIM. + +### Solutions + +**Add more seats to the organization** + +Purchase additional Docker Business subscription seats. For details, see [Manage subscription seats](/manuals/subscription/manage-seats.md). + +**Remove users or pending invitations** + +Review your organization members and pending invitations. Remove inactive users or pending invitations to free up seats. For more details, see [Manage organization members](/manuals/admin/organization/members.md). + +## Domain is not verified for SSO connection + +### Error message + +When this issue occurs, the following error message is common: +```text +Domain '$emailDomain' is not verified for your SSO connection. Contact your company administrator. TraceID: XXXXXXXXXXXXXX +``` + +### Possible causes + +This error occurs if the IdP authenticated a user through SSO and the User Principal Name (UPN) +returned to Docker doesn’t match any of the verified domains associated to the +SSO connection configured in Docker. + +### Solutions + +**Verify UPN attribute mapping** + +Ensure that the IdP SSO connection is returning the correct UPN value in the assertion attributes. + +**Add and verify all domains** + +Add and verify all domains and subdomains used as UPN by your IdP and associate them with your Docker SSO connection. For details, see [Configure single sign-on](/manuals/enterprise/security/single-sign-on/configure.md). + +## Unable to find session + +### Error message + +When this issue occurs, the following error message is common: +```text +We couldn't find your session. You may have pressed the back button, refreshed the page, opened too many sign-in dialogs, or there is some issue with cookies. Try signing in again. If the issue persists, contact your administrator. +``` + +### Possible causes + +The following causes may create this issue: +- The user pressed the back or refresh button during authentication. +- The authentication flow lost track of the initial request, preventing completion. + +### Solutions + +**Do not disrupt the authentication flow** + +Do not press the back or refresh button during sign-in. + +**Restart authentication** + +Close the browser tab and restart the authentication flow from the Docker application (Desktop, Hub, etc). + +## Name ID is not an email address + +### Error message + +When this issue occurs, the following error message is common: +```text +The name ID sent by the identity provider is not an email address. Contact your company administrator. +``` + +### Possible causes + +The following causes may create this issue: +- The IdP sends a Name ID (UPN) that does not comply with the email format required by Docker. +- Docker SSO requires the Name ID to be the primary email address of the user. + +### Solutions + +In your IdP, ensure the Name ID attribute format is correct: +1. Verify that the Name ID attribute format in your IdP is set to `EmailAddress`. +2. Adjust your IdP settings to return the correct Name ID format. \ No newline at end of file diff --git a/content/manuals/extensions/extensions-sdk/_index.md b/content/manuals/extensions/extensions-sdk/_index.md index ec818dd4c8b4..a7ed4c91842d 100644 --- a/content/manuals/extensions/extensions-sdk/_index.md +++ b/content/manuals/extensions/extensions-sdk/_index.md @@ -47,6 +47,6 @@ For further information, see [Architecture](architecture/_index.md). You distribute extensions through Docker Hub. However, you can develop them locally without the need to push the extension to Docker Hub. See [Extensions distribution](extensions/DISTRIBUTION.md) for further details. -{{< include "extensions-form.md" >}} +{{% include "extensions-form.md" %}} {{< grid >}} diff --git a/content/manuals/extensions/extensions-sdk/architecture/_index.md b/content/manuals/extensions/extensions-sdk/architecture/_index.md index 9112351d0600..15e2ff1a02a5 100644 --- a/content/manuals/extensions/extensions-sdk/architecture/_index.md +++ b/content/manuals/extensions/extensions-sdk/architecture/_index.md @@ -64,9 +64,9 @@ Usually, the backend is made of one container that runs within the Docker Deskto a Docker Compose project, creates the container from the `image` option of the `vm` section of the `metadata.json`, and attaches it to the Compose project. See the [ui metadata section](metadata.md#vm-section) for more details. -In some cases, a `compose.yml` file can be used instead of an `image`. This is useful when the backend container +In some cases, a `compose.yaml` file can be used instead of an `image`. This is useful when the backend container needs more specific options, such as mounting volumes or requesting [capabilities](https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities) -that can't be expressed just with a Docker image. The `compose.yml` file can also be used to add multiple containers +that can't be expressed just with a Docker image. The `compose.yaml` file can also be used to add multiple containers needed by the extension, like a database or a message broker. Note that, if the Compose file defines many services, the SDK can only contact the first of them. diff --git a/content/manuals/extensions/extensions-sdk/architecture/metadata.md b/content/manuals/extensions/extensions-sdk/architecture/metadata.md index 5caf7a090a09..650f3e31a579 100644 --- a/content/manuals/extensions/extensions-sdk/architecture/metadata.md +++ b/content/manuals/extensions/extensions-sdk/architecture/metadata.md @@ -50,7 +50,7 @@ Other UI extension points will be available in the future. ### VM section The `vm` section defines a backend service that runs inside the Desktop VM. It must define either an `image` or a -`docker-compose.yaml` file that specifies what service to run in the Desktop VM. +`compose.yaml` file that specifies what service to run in the Desktop VM. ```json "vm": { @@ -66,7 +66,7 @@ When you use `image`, a default compose file is generated for the extension. ```json "vm": { - "composefile": "docker-compose.yaml" + "composefile": "compose.yaml" }, ``` diff --git a/content/manuals/extensions/extensions-sdk/dev/api/overview.md b/content/manuals/extensions/extensions-sdk/dev/api/overview.md index 730a78619296..fe129ddb4ab0 100644 --- a/content/manuals/extensions/extensions-sdk/dev/api/overview.md +++ b/content/manuals/extensions/extensions-sdk/dev/api/overview.md @@ -14,7 +14,7 @@ and communicate with the Docker Desktop dashboard or the underlying system. JavaScript API libraries, with Typescript support, are available in order to get all the API definitions in to your extension code. -- [@docker/extension-api-client](https://www.npmjs.com/package/@docker/extension-api-client) gives access to the extension API entrypoint `DockerDesktopCLient`. +- [@docker/extension-api-client](https://www.npmjs.com/package/@docker/extension-api-client) gives access to the extension API entrypoint `DockerDesktopClient`. - [@docker/extension-api-client-types](https://www.npmjs.com/package/@docker/extension-api-client-types) can be added as a dev dependency in order to get types auto-completion in your IDE. ```Typescript diff --git a/content/manuals/extensions/extensions-sdk/extensions/_index.md b/content/manuals/extensions/extensions-sdk/extensions/_index.md index 23f73da04fb3..e69c1319f50a 100644 --- a/content/manuals/extensions/extensions-sdk/extensions/_index.md +++ b/content/manuals/extensions/extensions-sdk/extensions/_index.md @@ -44,4 +44,4 @@ Extensions published in the Marketplace benefit from update notifications to all In addition to providing a description of your extension's features and screenshots, you should also specify additional URLs using [extension labels](labels.md). This direct users to your website for reporting bugs and feedback, and accessing documentation and support. -{{< include "extensions-form.md" >}} +{{% include "extensions-form.md" %}} diff --git a/content/manuals/extensions/extensions-sdk/process.md b/content/manuals/extensions/extensions-sdk/process.md index e36e899593c5..d48f63698cf9 100644 --- a/content/manuals/extensions/extensions-sdk/process.md +++ b/content/manuals/extensions/extensions-sdk/process.md @@ -42,7 +42,7 @@ Docker Desktop displays published extensions in the Extensions Marketplace. The If you want your extension published in the Marketplace, read the [publish documentation](extensions/publish.md). -{{< include "extensions-form.md" >}} +{{% include "extensions-form.md" %}} ## What’s next? diff --git a/content/manuals/extensions/private-marketplace.md b/content/manuals/extensions/private-marketplace.md index f75cb862a756..a828d160dd58 100644 --- a/content/manuals/extensions/private-marketplace.md +++ b/content/manuals/extensions/private-marketplace.md @@ -13,7 +13,7 @@ aliases: Learn how to configure and set up a private marketplace with a curated list of extensions for your Docker Desktop users. -Docker Extensions' private marketplace is designed specifically for organizations who don’t give developers root access to their machines. It makes use of [Settings Management](/manuals/security/for-admins/hardened-desktop/settings-management/_index.md) so administrators have complete control over the private marketplace. +Docker Extensions' private marketplace is designed specifically for organizations who don’t give developers root access to their machines. It makes use of [Settings Management](/manuals/enterprise/security/hardened-desktop/settings-management/_index.md) so administrators have complete control over the private marketplace. ## Prerequisites @@ -79,7 +79,7 @@ Each setting has a `value` that you can set, including a `locked` field that let } ``` -To find out more information about the `admin-settings.json` file, see [Settings Management](/manuals/security/for-admins/hardened-desktop/settings-management/_index.md). +To find out more information about the `admin-settings.json` file, see [Settings Management](/manuals/enterprise/security/hardened-desktop/settings-management/_index.md). ## Step three: List allowed extensions @@ -192,7 +192,7 @@ These files must be placed on developer's machines. Depending on your operating - Windows: `C:\ProgramData\DockerDesktop` - Linux: `/usr/share/docker-desktop` -Make sure your developers are signed in to Docker Desktop in order for the private marketplace configuration to take effect. As an administrator, you should [enforce sign-in](/manuals/security/for-admins/enforce-sign-in/_index.md). +Make sure your developers are signed in to Docker Desktop in order for the private marketplace configuration to take effect. As an administrator, you should [enforce sign-in](/manuals/enterprise/security/enforce-sign-in/_index.md). ## Feedback diff --git a/content/manuals/extensions/settings-feedback.md b/content/manuals/extensions/settings-feedback.md index e1f672489abd..3ffd37fcb3e8 100644 --- a/content/manuals/extensions/settings-feedback.md +++ b/content/manuals/extensions/settings-feedback.md @@ -17,7 +17,7 @@ Docker Extensions is switched on by default. To change your settings: 1. Navigate to **Settings**. 2. Select the **Extensions** tab. 3. Next to **Enable Docker Extensions**, select or clear the checkbox to set your desired state. -4. In the bottom-right corner, select **Apply & Restart**. +4. In the bottom-right corner, select **Apply**. > [!NOTE] > @@ -26,7 +26,7 @@ Docker Extensions is switched on by default. To change your settings: > - `~/Library/Group Containers/group.com.docker/settings-store.json` on Mac > - `C:\Users\[USERNAME]\AppData\Roaming\Docker\settings-store.json` on Windows > -> This can also be done with [Hardened Docker Desktop](/manuals/security/for-admins/hardened-desktop/_index.md) +> This can also be done with [Hardened Docker Desktop](/manuals/enterprise/security/hardened-desktop/_index.md) ### Turn on or turn off extensions not available in the Marketplace @@ -35,7 +35,7 @@ You can install extensions through the Marketplace or through the Extensions SDK 1. Navigate to **Settings**. 2. Select the **Extensions** tab. 3. Next to **Allow only extensions distributed through the Docker Marketplace**, select or clear the checkbox to set your desired state. -4. In the bottom-right corner, select **Apply & Restart**. +4. In the bottom-right corner, select **Apply**. ### See containers created by extensions @@ -45,7 +45,7 @@ update your settings: 1. Navigate to **Settings**. 2. Select the **Extensions** tab. 3. Next to **Show Docker Extensions system containers**, select or clear the checkbox to set your desired state. -4. In the bottom-right corner, select **Apply & Restart**. +4. In the bottom-right corner, select **Apply**. > [!NOTE] > diff --git a/content/manuals/harmonia/_index.md b/content/manuals/harmonia/_index.md deleted file mode 100644 index b3a6b88d5f45..000000000000 --- a/content/manuals/harmonia/_index.md +++ /dev/null @@ -1,271 +0,0 @@ ---- -title: Project Harmonia -description: Learn how you can run your applications in the cloud with Project Harmonia -keywords: run, cloud, docker desktop, resources -sitemap: false -params: - sidebar: - group: Products -aliases: -- /run-cloud/ ---- - -{{% restricted title="Private preview" %}} -Project Harmonia is in Private preview. -{{% /restricted %}} - -Project Harmonia brings the power of the cloud to your local development workflow. You can now run your applications in the cloud whilst continuing to use your existing tools and workflows and without worrying about local resource limitations. Project Harmonia also lets you share previews of your cloud-based applications for real-time feedback. - -## Set up - -To get started with Project Harmonia, you need to: - -- Have a Docker account that's part of a Docker organization -- Email `run.cloud@docker.com` to get help with onboarding - -## Quickstart - -You can use Project Harmonia from the Docker Desktop Dashboard or from the CLI. - -This guide introduces you to essential commands and steps for creating, managing, and sharing a cloud engine. - -### Step one: Create a cloud engine - -{{< tabs group="method" >}} -{{< tab name="Docker Desktop">}} - -1. In the Docker Desktop Dashboard, navigate to the **Project Harmonia** tab. -2. In the top right-hand corner, select **Create Cloud Engine**. -3. Fill out the creation form: - - Enter `cloudengine` as the name - - Choose an organization to associate the cloud engine with - - Select the engine size and architecture - - Note that the **Switch Docker Context to use remote engine** is selected by default. The automatically switches you to your new cloud engine once it has been created. -4. Select **Create**. - -To verify creation, check the context switcher in the top-left corner of the Docker Desktop Dashboard; it should display `cloudengine`. You’re now ready to use it. - -{{< /tab >}} -{{< tab name="CLI">}} - -Run the following command: - -```console -$ docker harmonia engine create cloudengine --type "standard-amd64" --use -``` - -This creates an engine called `cloudengine` and: -- Immediately switches you to the new cloud engine with the `--use` flag. -- Sets the engine size to standard and the engine's CPU architecture to amd64 with the `--type` flag. - -Project Harmonia supports the following values for `--type`: -- `standard-arm64` -- `standard-amd64` (default) -- `large-arm64` -- `large-amd64` -- `aiml-amd64` - -Standard size engines have 2 CPU cores and 4GB RAM, large and AI/ML engines have 4 CPU cores and 8GB RAM. - -To verify you're using the newly created cloud engine, run: - -```console -$ docker context inspect -``` - -You should see the following: - -```text -[ - { - "Name": "cloudengine2", -... -``` - -{{< /tab >}} -{{< /tabs >}} - -### Step two: Run and remove containers with the newly created cloud engine - -1. Run an Nginx container in the cloud engine: - ```console - $ docker run -d --name cloudnginx -p 8080:80 nginx - ``` - This maps the container's port `80` to the host's port `8080`. If port `8080` is already in use on your host, you can specify a different port. -2. View the Nginx welcome page. Navigate to [`http://localhost:8080/`](http://localhost:8080/). -3. Verify the running container: - - In the **Containers** tab in the Docker Desktop Dashboard, you should see your Nginx container listed. - - Alternatively, list all running containers in the cloud engine via the terminal: - ```console - $ docker ps - ``` -4. Shut down the container: - ```console - $ docker kill cloudnginx - ``` - -Running a container with a cloud engine is just as straightforward as running it locally. - -### Step three: Create and switch to a new cloud engine - -{{< tabs group="method" >}} -{{< tab name="Docker Desktop">}} - -1. Create a new cloud engine: - - Enter `cloudengine2` as the name - - Choose an organization to associate the cloud engine with - - Select the **Standard** engine size with the **AMD-64** architecture - In the **Project Harmonia** view you should now see both `cloudengine` and `cloudengine2`. -2. Switch between engines, also known as your Docker contexts. Use the context switcher in the top-left corner of the Docker Desktop Dashboard to toggle between your cloud engines or switch from your local engine (`desktop-linux`) to a cloud engine. - -{{< /tab >}} -{{< tab name="CLI">}} - -1. Create a new cloud engine. Run: - ```console - $ docker harmonia engine create cloudengine2 - ``` - Docker automatically switches you to your new cloud engine. -2. Switch between engines, also known as your Docker contexts. Either switch to your first cloud engine: - ```console - $ docker context use cloudengine - ```  - Or switch back to your local engine: - ```console - $ docker context use desktop-linux - ``` - -{{< /tab >}} -{{< /tabs >}} - -### Step four: Use a file sync for your cloud engine - -Project Harmonia takes advantage of [Synchronized file shares](/manuals/desktop/features/synchronized-file-sharing.md) to enable local-to-remote file shares and port mappings. - -{{< tabs group="method" >}} -{{< tab name="Docker Desktop">}} - -1. Clone the [Awesome Compose](https://github.com/docker/awesome-compose) repository. -2. In the Docker Desktop Dashboard, navigate to the **Project Harmonia** view. -3. For the `cloudengine` cloud engine, select the **Actions** menu and then **Manage file syncs**. -4. Select **Create file sync**. -5. Navigate to the `awesome-compose/react-express-mysql` folder and select **Open**. -6. In your terminal, navigate to the `awesome-compose/react-express-mysql` directory. -7. Run the project in the cloud engine with: - ```console - $ docker compose up -d - ``` -8. Test the application by visiting [`http://localhost:3000`](http://localhost:3000/). - You should see the home page. The code for this page is located in `react-express-mysql/frontend/src/App.js`. -9. In an IDE or text editor, open the `App.js` file, change some text, and save. Watch as the code reloads live in your browser. - -{{< /tab >}} -{{< tab name="CLI">}} - -1. Clone the [Awesome Compose](https://github.com/docker/awesome-compose) repository. -2. In your terminal, change into the `awesome-compose/react-express-mysql` directory. -3. Create a file sync for `cloudengine`: - ```console - $ docker harmonia file-sync create --engine cloudengine $PWD -4. Run the project in the cloud engine with: - ```console - $ docker compose up -d - ``` -5. Test the application by visiting [`http://localhost:3000`](http://localhost:3000/). - You should see the home page. The code for this page is located in `react-express-mysql/frontend/src/App.js`. -6. In an IDE or text editor, open the `App.js` file, change some text, and save. Watch as the code reloads live in your browser. - -{{< /tab >}} -{{< /tabs >}} - -### Step five: Share a container port - -{{< tabs group="method" >}} -{{< tab name="Docker Desktop">}} - -1. Make sure your Docker context is set to `cloudengine`. -2. In the Docker Desktop Dashboard, navigate to the **Containers** view. -3. If necessary, expand the application listing to show all of its containers. -4. Select the **lock** icon in the **Ports** column of your running container next to `3000:3000`. - This creates a publicly accessible URL that you can share with teammates. -5. Select the **copy** icon, to copy this URL. - -To view all shared ports for your Docker context, select the **Shared ports** icon in the bottom-right corner of the Docker Desktop Dashboard. - -{{< /tab >}} -{{< tab name="CLI">}} - -To share a container port, make sure your Docker context is set to `cloudengine` and then run: -``` console -$ docker harmonia engine share create cloudengine 3000 -``` -This returns a publicly accessible URL for your React app hosted on port `3000`, that you can share with teammates. - -To see a list of all your shared ports, run: - -```console -$ docker harmonia engine share list -``` - -{{< /tab >}} -{{< /tabs >}} - -### Step six: Clean up - -{{< tabs group="method" >}} -{{< tab name="Docker Desktop">}} - -To stop the running project: - -```console -$ docker compose down -``` - -To remove a file sync session: -1. Navigate to your cloud engine in the **Project Harmonia** view. -2. Select the **Actions** menu and then **Manage file syncs**. -3. Select the **drop-down** icon on the file sync. -4. Select **Delete**. - -To remove a cloud engine, navigate to the **Project Harmonia** view and then select the **delete** icon. - -{{< /tab >}} -{{< tab name="CLI">}} - -To stop the running project: - -```console -$ docker compose down -``` - -To remove the file sync session, run: - -```console -$ docker harmonia file-sync delete --engine cloudengine $PWD -``` - -To remove a cloud engine, run: - -```console -$ docker harmonia engine delete -``` - -{{< /tab >}} -{{< /tabs >}} - -## Troubleshoot - -Run `docker harmonia doctor` to print helpful troubleshooting information. - -## Known issues - -- KinD does not run on Project Harmonia due to some hard-coded assumptions to ensure it's running in a privileged container. K3d is a good alternative. -- Containers cannot access host through DNS `host.docker.internal`. -- File binds (non-directory binds) are currently static, meaning changes will not be reflected until the container is restarted. This also affects Compose configs and secrets directives. -- Bind _mounts_, such as `-v /localpath:/incontainer` in the `docker run` command, require creating a file-sync. -- Creating a [synchronized file share](/manuals/desktop/features/synchronized-file-sharing.md) for a directory with a large amount of may take extra time to sync and become ready for use in a container. -- Bind _volumes_, such as those created with `docker volume create --driver local --opt type=none --opt o=bind --opt device=/some/host/path myvolname` or via the compose equivalent, are not supported. -- Port-forwarding for UDP is not supported. -- Docker Compose projects relying on `watch` in `sync` mode are not working with the `tar` synchronizer. Configure it to use `docker cp` instead, disable tar sync by setting `COMPOSE_EXPERIMENTAL_WATCH_TAR=0` in your environment. -- Some Docker Engine features that let you access the underlying host, such as `--pid=host`, `--network=host`, and `--ipc=host`, are currently disabled. diff --git a/content/manuals/offload/_index.md b/content/manuals/offload/_index.md new file mode 100644 index 000000000000..cea387365598 --- /dev/null +++ b/content/manuals/offload/_index.md @@ -0,0 +1,64 @@ +--- +title: Docker Offload +weight: 15 +description: Find documentation on Docker Offload to help you build and run your container images faster, both locally and in CI +keywords: build, cloud, cloud build, remote builder +params: + sidebar: + group: Products + badge: + color: blue + text: Beta + +grid: + +- title: Quickstart + description: Get up and running with Docker Offload in just a few steps. + icon: rocket_launch + link: /offload/quickstart/ + +- title: About + description: Learn about Docker Offload and how it works. + icon: info + link: /offload/about/ + +- title: Configure + description: Set up and customize your cloud build environments. + icon: tune + link: /offload/configuration/ + +- title: Usage + description: Learn about Docker Offload usage and how to monitor your cloud resources. + icon: monitor_heart + link: /offload/usage/ + +- title: Optimize + description: Improve performance, caching, and cost efficiency in Docker Offload. + icon: speed + link: /offload/optimize/ + +- title: Troubleshoot + description: Learn how to troubleshoot issues with Docker Offload. + icon: bug_report + link: /offload/troubleshoot/ + +- title: Feedback + description: Provide feedback on Docker Offload. + icon: feedback + link: /offload/feedback/ + +aliases: +- /harmonia/ +--- + +{{< summary-bar feature_name="Docker Offload" >}} + +Docker Offload is a fully managed service that lets you offload building and +running containers to the cloud using the Docker tools you already know. It +provides cloud infrastructure for fast, consistent builds and compute-heavy +workloads like running LLMs or machine learning pipelines. + +In the following topics, learn about Docker Offload, how to set it up, use it +for your workflows, and troubleshoot common issues. + +{{< grid >}} \ No newline at end of file diff --git a/content/manuals/offload/about.md b/content/manuals/offload/about.md new file mode 100644 index 000000000000..8f6c9a804a14 --- /dev/null +++ b/content/manuals/offload/about.md @@ -0,0 +1,126 @@ +--- +title: About Docker Offload +linktitle: About +weight: 15 +description: Learn about Docker Offload, its features, and how it works. +keywords: cloud, build, remote builder +--- + +Docker Offload is a fully managed service for building and running containers in +the cloud using the Docker tools you already know, including Docker Desktop, the +Docker CLI, and Docker Compose. It extends your local development workflow into a +scalable, cloud-powered environment, so you can offload compute-heavy tasks, +accelerate builds, and securely manage container workloads across the software +lifecycle. + +Docker Offload also supports GPU-accelerated instances, allowing you to +containerize and run compute-intensive workloads such as Docker Model Runner and +other machine learning or data processing tasks that benefit from GPU. + +## Key features + +Docker Offload includes the following capabilities to support modern container +workflows: + +- Cloud-based builds: Execute builds on remote, fully managed BuildKit instances +- GPU acceleration: Use NVIDIA L4 GPU-backed environments for machine learning, + media processing, and other compute-intensive workloads. +- Ephemeral cloud runners: Automatically provision and tear down cloud + environments for each container session. +- Shared build cache: Speed up build times across machines and teammates with a + smart, shared cache layer. +- Hybrid workflows: Seamlessly transition between local and remote execution + using Docker Desktop or CLI. +- Secure communication: Use encrypted tunnels between Docker Desktop and cloud + environments with support for secure secrets and image pulling. +- Port forwarding and bind mounts: Retain a local development experience even + when running containers in the cloud. +- VDI-friendly: Use Docker Offload in virtual desktop environments or systems that + don't support nested virtualization. + +## Why use Docker Offload? + +Docker Offload is designed to support modern development teams working across +local and cloud environments. It helps you: + +- Offload heavy builds and runs to fast, scalable infrastructure +- Accelerate feedback loops in development and testing +- Run containers that require more resources than your local setup can provide +- Build and run AI apps with instant access to GPU-powered environments +- Use Docker Compose to manage complex, multi-service apps that need cloud + resources +- Maintain consistent environments without managing custom infrastructure +- Develop efficiently in restricted or low-powered environments like VDIs + +Docker Offload is ideal for high-velocity development workflows +that need the flexibility of the cloud without sacrificing the simplicity of +local tools. + +## How Docker Offload works + +Docker Offload replaces the need to build or run containers locally by connecting +Docker Desktop to secure, dedicated cloud resources. + +### Building with Docker Offload + +When you use Docker Offload for builds, the `docker buildx build` command sends +the build request to a remote BuildKit instance in the cloud, instead of +executing it locally. Your workflow stays the same, only the execution +environment changes. + +The build runs on infrastructure provisioned and managed by Docker: + +- Each cloud builder is an isolated Amazon EC2 instance with its own EBS volume +- Remote builders use a shared cache to speed up builds across machines and + teammates +- Build results are encrypted in transit and sent to your specified destination + (such as a registry or local image store) + +Docker Offload manages the lifecycle of builders automatically. There's no need to +provision or maintain infrastructure. + +> [!NOTE] +> +> Docker Offload builders are currently hosted in the United States East region. Users in +> other regions may experience increased latency. + +### Running containers with Docker Offload + +When you use Docker Offload to run containers, a Docker Desktop creates a secure +SSH tunnel to a Docker daemon running in the cloud. Your containers are started +and managed entirely in that remote environment. + +Here's what happens: + +1. Docker Desktop connects to the cloud and triggers container creation. +2. Docker Offload pulls the required images and starts containers in the cloud. +3. The connection stays open while the containers run. +4. When the containers stop running, the environment shuts down and is cleaned + up automatically. + +This setup avoids the overhead of running containers locally and enables fast, +reliable containers even on low-powered machines, including machines that do not +support nested virtualization. This makes Docker Offload ideal for developers +using environments such as virtual desktops, cloud-hosted development machines, +or older hardware. + +Docker Offload also supports GPU-accelerated workloads. Containers that require +GPU access can run on cloud instances provisioned with NVIDIA L4 GPUs for +efficient AI inferencing, media processing, and general-purpose GPU +acceleration. This enables compute-heavy workflows such as model evaluation, +image processing, and hardware-accelerated CI tests to run seamlessly in the +cloud. + +Despite running remotely, features like bind mounts and port forwarding continue +to work seamlessly, providing a local-like experience from within Docker Desktop +and the CLI. + +Docker Offload provisions an ephemeral cloud environment for each session. The +environment remains active while you are interacting with Docker Desktop or +actively using containers. If no activity is detected for about 5 minutes, the +session shuts down automatically. This includes any containers, images, or +volumes in that environment, which are deleted when the session ends. + +## What's next + +Get hands-on with Docker Offload by following the [Docker Offload quickstart](/offload/quickstart/). \ No newline at end of file diff --git a/content/manuals/offload/configuration.md b/content/manuals/offload/configuration.md new file mode 100644 index 000000000000..0af088a11bfd --- /dev/null +++ b/content/manuals/offload/configuration.md @@ -0,0 +1,104 @@ +--- +title: Configure Docker Offload +linktitle: Configure +weight: 20 +description: Learn how to configure build settings for Docker Offload. +keywords: cloud, configuration, settings, cloud builder, GPU, disk allocation, private resources, firewall +--- + +To use Docker Offload, you must start it in Docker Desktop. For more details, +see the [Docker Offload quickstart](/offload/quickstart/). + +Settings for the cloud builders in Docker Offload can be further configured, in +addition to settings for an entire organization, through **Offload settings** in +the Docker Offload dashboard. + +> [!NOTE] +> +> To view usage and configure billing for Docker Offload, see [Docker Offload +> usage and billing](/offload/usage/). + +## Offload settings + +The **Offload settings** page in Docker Home lets you configure disk +allocation, private resource access, and firewall settings for your cloud +builders in your organization. + +To view the **Offload settings** page: + +1. Go to [Docker Home](https://app.docker.com/). +2. Select the account for which you want to manage Docker Offload. +3. Select **Offload** > **Offload settings**. + +The following sections describe the available settings. + +### Disk allocation + +The **Disk allocation** setting lets you control how much of the available +storage is dedicated to the build cache. A lower allocation increases storage +available for active builds. + +Adjust the **Disk allocation** slider to specify the percentage of storage used +for build caching. + +Any changes take effect immediately. + +> [!TIP] +> +> If you build very large images, consider allocating less storage for caching. + +### Build cache space + +Your subscription includes the following Build cache space: + +| Subscription | Build cache space | +|--------------|-------------------| +| Personal | N/A | +| Pro | 50GB | +| Team | 100GB | +| Business | 200GB | + +To get more Build cache space, [upgrade your subscription](/manuals/subscription/change.md). + +### Private resource access + +Private resource access lets cloud builders pull images and packages from +private resources. This feature is useful when builds rely on self-hosted +artifact repositories or private OCI registries. + +For example, if your organization hosts a private [PyPI](https://pypi.org/) +repository on a private network, Docker Build Cloud would not be able to access +it by default, since the cloud builder is not connected to your private network. + +To enable your cloud builders to access your private resources, enter the host +name and port of your private resource and then select **Add**. + +#### Authentication + +If your internal artifacts require authentication, make sure that you +authenticate with the repository either before or during the build. For internal +package repositories for npm or PyPI, use [build +secrets](/manuals/build/building/secrets.md) to authenticate during the build. +For internal OCI registries, use `docker login` to authenticate before building. + +Note that if you use a private registry that requires authentication, you will +need to authenticate with `docker login` twice before building. This is because +the cloud builder needs to authenticate with Docker to use the cloud builder, +and then again to authenticate with the private registry. + +```console +$ echo $DOCKER_PAT | docker login docker.io -u --password-stdin +$ echo $REGISTRY_PASSWORD | docker login registry.example.com -u --password-stdin +$ docker build --builder --tag registry.example.com/ --push . +``` + +### Firewall + +Firewall settings let you restrict cloud builder egress traffic to specific IP +addresses. This helps enhance security by limiting external network egress from +the builder. + +1. Select **Enable firewall: Restrict cloud builder egress to specific public IP address**. +2. Enter the IP address you want to allow. +3. Select **Add** to apply the restriction. + diff --git a/content/manuals/offload/feedback.md b/content/manuals/offload/feedback.md new file mode 100644 index 000000000000..ed73320fd4bc --- /dev/null +++ b/content/manuals/offload/feedback.md @@ -0,0 +1,35 @@ +--- +description: Find a way to provide feedback that's right for you +keywords: Feedback, Docker Offload, bugs, problems, issues +title: Give feedback +weight: 900 +--- + +There are several ways you can provide feedback on Docker Offload. + +### In-product feedback + +On each Docker Desktop Dashboard view, there is a **Give feedback** link. This +opens a feedback form where you can share ideas directly with the Docker team. + +You can use the form for any type of feedback, including: + +- Report unexpected behavior +- Suggest improvements to Offload +- Share what's working well for you +- Describe how Offload integrates into your workflow and supports your development process + +### Report bugs or problems on GitHub + +To report bugs or problems, visit: +- [Docker Desktop for Mac issues on +GitHub](https://github.com/docker/for-mac/issues) +- [Docker Desktop for Windows issues on GitHub](https://github.com/docker/for-win/issues) +- [Docker Desktop for Linux issues on +GitHub](https://github.com/docker/desktop-linux/issues) + +### Suggest features or ideas + +To suggest new features or improvements, visit the [Docker Public +Roadmap](https://github.com/docker/roadmap/discussions). You can browse existing +ideas, vote on what matters to you, or open a new discussion. diff --git a/content/manuals/offload/images/cloud-mode.png b/content/manuals/offload/images/cloud-mode.png new file mode 100644 index 000000000000..f8b1ceb60d25 Binary files /dev/null and b/content/manuals/offload/images/cloud-mode.png differ diff --git a/content/manuals/offload/optimize.md b/content/manuals/offload/optimize.md new file mode 100644 index 000000000000..99e4aa8f6dc0 --- /dev/null +++ b/content/manuals/offload/optimize.md @@ -0,0 +1,83 @@ +--- +title: Optimize Docker Offload usage +linktitle: Optimize usage +weight: 40 +description: Learn how to optimize your Docker Offload usage. +keywords: cloud, optimize, performance, caching, cost efficiency +--- + +Docker Offload runs your builds remotely, not on the machine where you invoke the +build. This means that files must be transferred from your local system to the +cloud over the network. + +Transferring files over the network introduces higher latency and lower +bandwidth compared to local transfers. To reduce these effects, Docker Offload +includes several performance optimizations: + +- It uses attached storage volumes for build cache, which makes reading and writing cache fast. +- When pulling build results back to your local machine, it only transfers layers that changed since the previous build. + +Even with these optimizations, large projects or slower network connections can +lead to longer transfer times. Here are several ways to optimize your build +setup for Docker Offload: + +- [Use `.dockerignore` files](#dockerignore-files) +- [Choose slim base images](#slim-base-images) +- [Use multi-stage builds](#multi-stage-builds) +- [Fetch remote files during the build](#fetch-remote-files-in-build) +- [Leverage multi-threaded tools](#multi-threaded-tools) + +For general Dockerfile tips, see [Building best practices](/manuals/build/building/best-practices.md). + +### dockerignore files + +A [`.dockerignore` file](/manuals/build/concepts/context.md#dockerignore-files) +lets you specify which local files should *not* be included in the build +context. Files excluded by these patterns won’t be uploaded to Docker Offload +during a build. + +Typical items to ignore: + +- `.git` – avoids transferring your version history. (Note: you won’t be able to run `git` commands in the build.) +- Build artifacts or locally generated binaries. +- Dependency folders such as `node_modules`, if those are restored in the build + process. + +As a rule of thumb, your `.dockerignore` should be similar to your `.gitignore`. + +### Slim base images + +Smaller base images in your `FROM` instructions can reduce final image size and +improve build performance. The [`alpine`](https://hub.docker.com/_/alpine) image +is a good example of a minimal base. + +For fully static binaries, you can use [`scratch`](https://hub.docker.com/_/scratch), which is an empty base image. + +### Multi-stage builds + +[Multi-stage builds](/build/building/multi-stage/) let you separate build-time +and runtime environments in your Dockerfile. This not only reduces the size of +the final image but also allows for parallel stage execution during the build. + +Use `COPY --from` to copy files from earlier stages or external images. This +approach helps minimize unnecessary layers and reduce final image size. + +### Fetch remote files in build + +When possible, download large files from the internet during the build itself +instead of bundling them in your local context. This avoids network transfer +from your client to Docker Offload. + +You can do this using: + +- The Dockerfile [`ADD` instruction](/reference/dockerfile/#add) +- `RUN` commands like `wget`, `curl`, or `rsync` + +### Multi-threaded tools + +Some build tools, such as `make`, are single-threaded by default. If the tool +supports it, configure it to run in parallel. For example, use `make --jobs=4` +to run four jobs simultaneously. + +Taking advantage of available CPU resources in the cloud can significantly +improve build time. \ No newline at end of file diff --git a/content/manuals/offload/quickstart.md b/content/manuals/offload/quickstart.md new file mode 100644 index 000000000000..9b07b4c2d7c6 --- /dev/null +++ b/content/manuals/offload/quickstart.md @@ -0,0 +1,91 @@ +--- +title: Docker Offload quickstart +linktitle: Quickstart +weight: 10 +description: Learn how to use Docker Offload to build and run your container images faster, both locally and in CI. +keywords: cloud, quickstart, cloud mode, Docker Desktop, GPU support, cloud builder, usage +--- + +{{< summary-bar feature_name="Docker Offload" >}} + +This quickstart helps you get started with Docker Offload. Docker Offload lets +you build and run container images faster by offloading resource-intensive tasks +to the cloud. It provides a cloud-based environment that mirrors your local +Docker Desktop experience. + +## Step 1: Sign up and subscribe to Docker Offload for access + +To access Docker Offload, you must [sign +up](https://www.docker.com/products/docker-offload/) and subscribe. + +## Step 2: Start Docker Offload + +> [!NOTE] +> +> After subscribing to Docker Offload, the first time you start Docker Desktop +> and sign in, you may be prompted to start Docker Offload. If you start Docker +> Offload via this prompt, you can skip the following steps. Note that you can +> use the following steps to start Docker Offload at any time. + + +1. Start Docker Desktop and sign in. +2. Open a terminal and run the following command to start Docker Offload: + + ```console + $ docker offload start + ``` + +3. When prompted, select your account to use for Docker Offload. This account + will consume credits for your Docker Offload usage. + +4. When prompted, select whether to enable GPU support. If you choose to enable + GPU support, Docker Offload will run in an instance with an NVIDIA L4 GPU, + which is useful for machine learning or compute-intensive workloads. + + > [!NOTE] + > + > Enabling GPU support consumes more budget. For more details, see [Docker + > Offload usage](/offload/usage/). + +When Docker Offload is started, you'll see a cloud icon ({{< inline-image +src="./images/cloud-mode.png" alt="Offload mode icon" >}}) +in the Docker Desktop Dashboard header, and the Docker Desktop Dashboard appears purple. +You can run `docker offload status` in a terminal to check the status of +Docker Offload. + +## Step 3: Run a container with Docker Offload + +After starting Docker Offload, Docker Desktop connects to a secure cloud environment +that mirrors your local experience. When you run builds or containers, they +execute remotely, but behave just like local ones. + +To verify that Docker Offload is working, run a container: + +```console +$ docker run --rm hello-world +``` + +If you enabled GPU support, you can also run a GPU-enabled container: + +```console +$ docker run --rm --gpus all hello-world +``` + +If Docker Offload is working, you'll see `Hello from Docker!` in the terminal output. + +## Step 4: Stop Docker Offload + +When you're done using Docker Offload, you can stop it. When stopped, you build +images and run containers locally. + +```console +$ docker offload stop +``` + +To start Docker Offload again, run the `docker offload start` command. + +## What's next + +- [Configure Docker Offload](configuration.md). +- Try [Docker Model Runner](../ai/model-runner/_index.md) or + [Compose](../ai/compose/models-and-compose.md) to run AI models using Docker Offload. \ No newline at end of file diff --git a/content/manuals/offload/troubleshoot.md b/content/manuals/offload/troubleshoot.md new file mode 100644 index 000000000000..802063cb0385 --- /dev/null +++ b/content/manuals/offload/troubleshoot.md @@ -0,0 +1,55 @@ +--- +title: Troubleshoot Docker Offload +linktitle: Troubleshoot +weight: 800 +description: Learn how to troubleshoot issues with Docker Offload. +keywords: cloud, troubleshooting, cloud mode, Docker Desktop, cloud builder, usage +tags: [Troubleshooting] +--- + +Docker Offload requires: + +- Authentication +- An active internet connection +- No restrictive proxy or firewall blocking traffic to Docker Cloud +- Beta access to Docker Offload +- Docker Desktop 4.43 or later + +Docker Desktop uses Offload to run both builds and containers in the cloud. +If builds or containers are failing to run, falling back to local, or reporting +session errors, use the following steps to help resolve the issue. + +1. Ensure Docker Offload is enabled in Docker Desktop: + + 1. Open Docker Desktop and sign in. + 2. Go to **Settings** > **Beta features**. + 3. Ensure that **Docker Offload** is checked. + +2. Use the following command to check if the connection is active: + + ```console + $ docker offload status + ``` + +3. To get more information, run the following command: + + ```console + $ docker offload diagnose + ``` + +4. If you're not connected, start a new session: + + ```console + $ docker offload start + ``` + +5. Verify authentication with `docker login`. + +6. If needed, you can sign out and then sign in again: + + ```console + $ docker logout + $ docker login + ``` + +7. Verify your usage and billing. For more information, see [Docker Offload usage](/offload/usage/). \ No newline at end of file diff --git a/content/manuals/offload/usage.md b/content/manuals/offload/usage.md new file mode 100644 index 000000000000..d31c06295bc0 --- /dev/null +++ b/content/manuals/offload/usage.md @@ -0,0 +1,80 @@ +--- +title: Docker Offload usage and billing +linktitle: Usage & billing +weight: 30 +description: Learn about Docker Offload usage and how to monitor your cloud resources. +keywords: cloud, usage, cloud minutes, shared cache, top repositories, cloud builder, Docker Offload +--- + +{{< summary-bar feature_name="Docker Offload" >}} + +## Docker Offload billing + +For Docker Offload, you can view and configure billing on the **Docker Offload** +page in [Docker Home Billing](https://app.docker.com/billing). On this page, you +can: + +- View your included budget +- View rates for cloud resources +- Enable or disable on-demand usage +- Add or change payment methods + +For more general information about billing, see [Billing](../billing/_index.md). + +## Docker Offload overview + +The Docker Offload overview page in Docker Home provides visibility into +how you or your team is using cloud resources to build and run containers. + +To view the **Overview** page: + +1. Sign in to [Docker Home](https://app.docker.com/). +2. Select the account for which you want to manage Docker Offload. +3. Select **Offload** > **Overview**. + +The following sections describe the available widgets on **Overview**. + +### Offload minutes + +This widget shows the total number of offload minutes used over time. Offload +minutes represent the time spent running builds and containers in the Offload +environment. You can use this chart to: + +- Track your Offload usage trends over time. +- Spot spikes in usage, which may indicate CI changes or build issues. +- Estimate usage against your subscription limits. + +### Build cache usage + +This widget displays data about cache re-use across all builds, helping you +understand how effectively Docker Offload is using the build cache. It +provides insight into: + +- The percentage of cache hits vs. misses. +- How much estimated build time is saved by reusing cache layers. +- Opportunities to improve cache efficiency by tuning your Dockerfiles or build + strategy. + +### Top repositories built + +This widget highlights the repositories with the highest build activity for +Docker Offload. This widget helps you understand which projects consume the most +cloud resources and how efficiently they're being built. + +It includes both aggregated metrics and per-repository details to give you a +comprehensive view. + +Use this widget to: + +- Identify build hotspots: See which repositories are consuming the most build + time and resources. +- Spot trends: Monitor how build activity evolves across your projects. +- Evaluate efficiency: Check which repositories benefit most from cache re-use. +- Target improvements: Flag repositories with low cache hits or high failure + rates for optimization. + +### Top 10 images + +This widget shows the top 10 images used in Docker Offload in run sessions. It +provides insight into which images are most frequently used, helping you +understand your team's container usage patterns. diff --git a/content/manuals/platform-release-notes.md b/content/manuals/platform-release-notes.md index e7ceb4457104..41029807be83 100644 --- a/content/manuals/platform-release-notes.md +++ b/content/manuals/platform-release-notes.md @@ -12,7 +12,12 @@ tags: [Release notes, admin] This page provides details on new features, enhancements, known issues, and bug fixes across Docker Home, the Admin Console, billing, security, and subscription functionalities. -Take a look at the [Docker Public Roadmap](https://github.com/orgs/docker/projects/51/views/1?filterQuery=) to see what's coming next. +## 2025-01-30 + +### New + +- Installing Docker Desktop via the PKG installer is now generally available. +- Enforcing sign-in via configuration profiles is now generally available. ## 2024-12-10 @@ -28,22 +33,22 @@ Take a look at the [Docker Public Roadmap](https://github.com/orgs/docker/projec ### New - Administrators can now: - - Enforce sign-in with macOS [configuration profiles](/manuals/security/for-admins/enforce-sign-in/methods.md#configuration-profiles-method-mac-only) (Early Access). + - Enforce sign-in with [configuration profiles](/manuals/enterprise/security/enforce-sign-in/methods.md#configuration-profiles-method-mac-only) (Early Access). - Enforce sign-in for more than one organization at a time (Early Access). - - Deploy Docker Desktop for Mac in bulk with the [PKG installer](/manuals/desktop/setup/install/enterprise-deployment/pkg-install-and-configure.md) (Early Access). - - [Use Desktop Settings Management via the Docker Admin Console](/manuals/security/for-admins/hardened-desktop/settings-management/configure-admin-console.md) (Early Access). + - Deploy Docker Desktop for Mac in bulk with the [PKG installer](/manuals/enterprise/enterprise-deployment/pkg-install-and-configure.md) (Early Access). + - [Use Desktop Settings Management via the Docker Admin Console](/manuals/enterprise/security/hardened-desktop/settings-management/configure-admin-console.md) (Early Access). ### Bug fixes and enhancements - Enhance Container Isolation (ECI) has been improved to: - - Permit admins to [turn off Docker socket mount restrictions](/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/config.md#allowing-all-containers-to-mount-the-docker-socket). - - Support wildcard tags when using the [`allowedDerivedImages` setting](/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/config.md#docker-socket-mount-permissions-for-derived-images). + - Permit admins to [turn off Docker socket mount restrictions](/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/config.md#allowing-all-containers-to-mount-the-docker-socket). + - Support wildcard tags when using the [`allowedDerivedImages` setting](/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/config.md#docker-socket-mount-permissions-for-derived-images). ## 2024-11-11 ### New -- [Personal access tokens](/security/for-developers/access-tokens/) (PATs) now support expiration dates. +- [Personal access tokens](/security/access-tokens/) (PATs) now support expiration dates. ## 2024-10-15 @@ -55,17 +60,17 @@ Take a look at the [Docker Public Roadmap](https://github.com/orgs/docker/projec ### New -- Deploying Docker Desktop via the [MSI installer](/manuals/desktop/setup/install/enterprise-deployment/msi-install-and-configure.md) is now generally available. -- Two new methods to [enforce sign-in](/manuals/security/for-admins/enforce-sign-in/_index.md) (Windows registry key and `.plist` file) are now generally available. +- Deploying Docker Desktop via the [MSI installer](/manuals/enterprise/enterprise-deployment/msi-install-and-configure.md) is now generally available. +- Two new methods to [enforce sign-in](/manuals/enterprise/security/enforce-sign-in/_index.md) (Windows registry key and `.plist` file) are now generally available. ## 2024-08-24 ### New -- Administrators can now view [organization insights](/manuals/admin/organization/insights.md) (Early Access). +- Administrators can now view [organization Insights](/manuals/admin/organization/insights.md). ## 2024-07-17 ### New -- You can now centrally access and manage Docker products in [Docker Home](https://app.docker.com) (Early Access). \ No newline at end of file +- You can now centrally access and manage Docker products in [Docker Home](https://app.docker.com). \ No newline at end of file diff --git a/content/manuals/registry.md b/content/manuals/registry.md deleted file mode 100644 index ac668945ee49..000000000000 --- a/content/manuals/registry.md +++ /dev/null @@ -1,94 +0,0 @@ ---- -title: Registry -description: The Docker Hub registry implementation -keywords: registry, distribution, docker hub, spec, schema, api, manifest, auth -params: - sidebar: - group: Open source -aliases: - - /registry/compatibility/ - - /registry/configuration/ - - /registry/deploying/ - - /registry/deprecated/ - - /registry/garbage-collection/ - - /registry/help/ - - /registry/insecure/ - - /registry/introduction/ - - /registry/notifications/ - - /registry/recipes/ - - /registry/recipes/apache/ - - /registry/recipes/nginx/ - - /registry/recipes/osx-setup-guide/ - - /registry/spec/ - - /registry/spec/api/ - - /registry/spec/auth/ - - /registry/spec/auth/jwt/ - - /registry/spec/auth/oauth/ - - /registry/spec/auth/scope/ - - /registry/spec/auth/token/ - - /registry/spec/deprecated-schema-v1/ - - /registry/spec/implementations/ - - /registry/spec/json/ - - /registry/spec/manifest-v2-1/ - - /registry/spec/manifest-v2-2/ - - /registry/spec/menu/ - - /registry/storage-drivers/ - - /registry/storage-drivers/azure/ - - /registry/storage-drivers/filesystem/ - - /registry/storage-drivers/gcs/ - - /registry/storage-drivers/inmemory/ - - /registry/storage-drivers/oss/ - - /registry/storage-drivers/s3/ - - /registry/storage-drivers/swift/ ---- - -> [!IMPORTANT] -> -> The ability to push [deprecated Docker image manifest version 2, schema 1](https://distribution.github.io/distribution/spec/deprecated-schema-v1/) images to Docker Hub is deprecated as of November 4th, 2024. - -Registry, the open source implementation for storing and distributing container -images and other content, has been donated to the CNCF. Registry now goes under -the name of Distribution, and the documentation has moved to -[distribution/distribution]. - -The Docker Hub registry implementation is based on Distribution. Docker Hub -implements version 1.0.1 OCI distribution [specification]. For reference -documentation on the API protocol that Docker Hub implements, refer to the OCI -distribution specification. - -## Supported media types - -Docker Hub supports the following image manifest formats for pulling images: - -- [OCI image manifest] -- [Docker image manifest version 2, schema 2] -- Docker image manifest version 2, schema 1 -- Docker image manifest version 1 - -You can push images with the following formats: - -- [OCI image manifest] -- [Docker image manifest version 2, schema 2] - -Docker Hub also supports OCI artifacts. See [OCI artifacts]. - -## Authentication - -For documentation related to authentication to the Docker Hub registry, see: - -- [Token authentication specification][token] -- [OAuth 2.0 token authentication][oauth2] -- [JWT authentication][jwt] -- [Token scope and access][scope] - - - -[distribution/distribution]: https://distribution.github.io/distribution/ -[specification]: https://github.com/opencontainers/distribution-spec/blob/v1.0.1/spec.md -[OCI image manifest]: https://github.com/opencontainers/image-spec/blob/main/manifest.md -[Docker image manifest version 2, schema 2]: https://distribution.github.io/distribution/spec/manifest-v2-2/ -[OCI artifacts]: /docker-hub/repos/manage/hub-images/oci-artifacts/ -[oauth2]: https://distribution.github.io/distribution/spec/auth/oauth/ -[jwt]: https://distribution.github.io/distribution/spec/auth/jwt/ -[token]: https://distribution.github.io/distribution/spec/auth/token/ -[scope]: https://distribution.github.io/distribution/spec/auth/scope/ diff --git a/content/manuals/retired.md b/content/manuals/retired.md new file mode 100644 index 000000000000..c99eec578f82 --- /dev/null +++ b/content/manuals/retired.md @@ -0,0 +1,196 @@ +--- +title: Deprecated and retired Docker products and features +linkTitle: Deprecated products and features +description: | + Explore deprecated and retired Docker features, products, and open source + projects, including details on transitioned tools and archived initiatives. +params: + sidebar: + group: Products +aliases: + - /cloud/ + - /cloud/aci-compose-features/ + - /cloud/aci-container-features/ + - /cloud/aci-integration/ + - /cloud/ecs-architecture/ + - /cloud/ecs-compose-examples/ + - /cloud/ecs-compose-features/ + - /cloud/ecs-integration/ + - /engine/context/aci-integration/ + - /engine/context/ecs-integration/ + - /machine/ + - /machine/drivers/hyper-v/ + - /machine/get-started/ + - /machine/install-machine/ + - /machine/overview/ + - /registry/ + - /registry/compatibility/ + - /registry/configuration/ + - /registry/deploying/ + - /registry/deprecated/ + - /registry/garbage-collection/ + - /registry/help/ + - /registry/insecure/ + - /registry/introduction/ + - /registry/notifications/ + - /registry/recipes/ + - /registry/recipes/apache/ + - /registry/recipes/nginx/ + - /registry/recipes/osx-setup-guide/ + - /registry/spec/ + - /registry/spec/api/ + - /registry/spec/auth/ + - /registry/spec/auth/jwt/ + - /registry/spec/auth/oauth/ + - /registry/spec/auth/scope/ + - /registry/spec/auth/token/ + - /registry/spec/deprecated-schema-v1/ + - /registry/spec/implementations/ + - /registry/spec/json/ + - /registry/spec/manifest-v2-1/ + - /registry/spec/manifest-v2-2/ + - /registry/spec/menu/ + - /registry/storage-drivers/ + - /registry/storage-drivers/azure/ + - /registry/storage-drivers/filesystem/ + - /registry/storage-drivers/gcs/ + - /registry/storage-drivers/inmemory/ + - /registry/storage-drivers/oss/ + - /registry/storage-drivers/s3/ + - /registry/storage-drivers/swift/ + - /toolbox/ + - /toolbox/overview/ + - /toolbox/toolbox_install_mac/ + - /toolbox/toolbox_install_windows/ + - /desktop/features/dev-environments/ + - /desktop/features/dev-environments/create-dev-env/ + - /desktop/features/dev-environments/set-up/ + - /desktop/features/dev-environments/share/ + - /desktop/features/dev-environments/dev-cli/ +--- + +This document provides an overview of Docker features, products, and +open-source projects that have been deprecated, retired, or transitioned. + +> [!NOTE] +> +> This page does not cover deprecated and removed Docker Engine features. +> For a detailed list of deprecated Docker Engine features, refer to the +> [Docker Engine Deprecated Features documentation](/manuals/engine/deprecated.md). + +## Products and features + +Support for these deprecated or retired features is no longer provided by +Docker, Inc. The projects that have been transitioned to third parties continue +to receive updates from their new maintainers. + +### Docker Machine + +Docker Machine was a tool for provisioning and managing Docker hosts across +various platforms, including virtual machines and cloud providers. It is no +longer maintained, and users are encouraged to use [Docker Desktop](/manuals/desktop/_index.md) +or [Docker Engine](/manuals/engine/_index.md) directly on supported platforms. +Machine's approach to creating and configuring hosts has been superseded by +more modern workflows that integrate more closely with Docker Desktop. + +### Docker Toolbox + +Docker Toolbox was used on older systems where Docker Desktop could not run. It +bundled Docker Machine, Docker Engine, and Docker Compose into a single +installer. Toolbox is no longer maintained and is effectively replaced by +[Docker Desktop](/manuals/desktop/_index.md) on current systems. References to +Docker Toolbox occasionally appear in older documentation or community +tutorials, but it is not recommended for new installations. + +### Docker Cloud integrations + +Docker previously offered integrations for Amazon's Elastic Container Service +(ECS) and Azure Container Instances (ACI) to streamline container workflows. +These integrations have been deprecated, and users should now rely on native +cloud tools or third-party solutions to manage their workloads. The move toward +platform-specific or universal orchestration tools reduced the need for +specialized Docker Cloud integrations. + +You can still view the relevant documentation for these integrations in the +[Compose CLI repository](https://github.com/docker-archive/compose-cli/tree/main/docs). + +### Docker Enterprise Edition + +Docker Enterprise Edition (EE) was Docker's commercial platform for deploying +and managing large-scale container environments. It was acquired by Mirantis in +2019, and users looking for enterprise-level functionality can now explore +Mirantis Kubernetes Engine or other products offered by Mirantis. Much of the +technology and features found in Docker EE have been absorbed into the Mirantis +product line. + +> [!NOTE] +> For information about enterprise-level features offered by Docker today, +> see the [Docker Business subscription](/manuals/subscription/details.md#docker-business). + +### Docker Data Center and Docker Trusted Registry + +Docker Data Center (DDC) was an umbrella term that encompassed Docker Universal +Control Plane (UCP) and Docker Trusted Registry (DTR). These components +provided a full-stack solution for managing containers, security, and registry +services in enterprise environments. They are now under the Mirantis portfolio +following the Docker Enterprise acquisition. Users still encountering +references to DDC, UCP, or DTR should refer to Mirantis's documentation for +guidance on modern equivalents. + +### Dev Environments + +Dev Environments was a feature introduced in Docker Desktop that allowed +developers to spin up development environments quickly. It was deprecated and removed from Docker Desktop version 4.42 and later. Similar workflows can be achieved through +Docker Compose or by creating custom configurations tailored to specific +project requirements. + +## Open source projects + +Several open-source projects originally maintained by Docker have been +archived, discontinued, or transitioned to other maintainers or organizations. + +### Registry (now CNCF Distribution) + +The Docker Registry served as the open-source implementation of a container +image registry. It was donated to the Cloud Native Computing Foundation (CNCF) +in 2019 and is maintained under the name "Distribution." It remains a +cornerstone for managing and distributing container images. + +[CNCF Distribution](https://github.com/distribution/distribution) + +### Docker Compose v1 (replaced by Compose v2) + +Docker Compose v1 (`docker-compose`), a Python-based tool for defining +multi-container applications, has been superseded by Compose v2 (`docker +compose`), which is written in Go and integrates with the Docker CLI. Compose +v1 is no longer maintained, and users should migrate to Compose v2. + +[Compose v2 Documentation](/manuals/compose/_index.md) + +### InfraKit + +InfraKit was an open-source toolkit designed to manage declarative +infrastructure and automate container deployments. It has been archived, and +users are encouraged to explore tools such as Terraform for infrastructure +provisioning and orchestration. + +[InfraKit GitHub Repository](https://github.com/docker/infrakit) + +### Docker Notary (now CNCF Notary) + +Docker Notary was a system for signing and verifying the authenticity of +container content. It was donated to the CNCF in 2017 and continues to be +developed as "Notary." Users seeking secure content verification should consult +the CNCF Notary project. + +[CNCF Notary](https://github.com/notaryproject/notary) + +### SwarmKit + +SwarmKit powers Docker Swarm mode by providing orchestration for container +deployments. While Swarm mode remains functional, development has slowed in +favor of Kubernetes-based solutions. Individuals evaluating container +orchestration options should investigate whether SwarmKit meets modern workload +requirements. + +[SwarmKit GitHub Repository](https://github.com/docker/swarmkit) diff --git a/content/manuals/scout/_index.md b/content/manuals/scout/_index.md index a7e3c3b3cd69..6d7dbb2f8cba 100644 --- a/content/manuals/scout/_index.md +++ b/content/manuals/scout/_index.md @@ -40,7 +40,7 @@ grid: - title: Upgrade link: /subscription/change/ description: | - The free plan includes up to 1 repository. Upgrade for more. + A Personal subscription includes up to 1 repository. Upgrade for more. icon: upgrade --- diff --git a/content/manuals/scout/explore/dashboard.md b/content/manuals/scout/explore/dashboard.md index af6d75e4e658..2e1c666e3b67 100644 --- a/content/manuals/scout/explore/dashboard.md +++ b/content/manuals/scout/explore/dashboard.md @@ -11,10 +11,10 @@ aliases: The [Docker Scout Dashboard](https://scout.docker.com/) helps you share the analysis of images in an organization with your team. Developers can now see an -overview of their security status across all their images from both Docker Hub -and Artifactory, and get remediation advice at their fingertips. It helps team -members in roles such as security, compliance, and operations to know what -vulnerabilities and issues they need to focus on. +overview of their security status across all their images from Docker Hub, and +get remediation advice at their fingertips. It helps team members in roles such +as security, compliance, and operations to know what vulnerabilities and issues +they need to focus on. ## Overview diff --git a/content/manuals/scout/explore/metrics-exporter.md b/content/manuals/scout/explore/metrics-exporter.md index 5426d265a8b1..5a4222e6ad31 100644 --- a/content/manuals/scout/explore/metrics-exporter.md +++ b/content/manuals/scout/explore/metrics-exporter.md @@ -40,7 +40,7 @@ To export metrics from your organization, first make sure your organization is e Then, create a Personal Access Token (PAT) - a secret token that allows the exporter to authenticate with the Docker Scout API. The PAT does not require any specific permissions, but it must be created by a user who is an owner of the Docker organization. -To create a PAT, follow the steps in [Create an access token](/security/for-developers/access-tokens/#create-an-access-token). +To create a PAT, follow the steps in [Create an access token](/security/access-tokens/#create-an-access-token). Once you have created the PAT, store it in a secure location. You will need to provide this token to the exporter when scraping metrics. @@ -108,7 +108,7 @@ alongside Grafana with a pre-configured dashboard to visualize the vulnerability $ cd scout-metrics-exporter/prometheus ``` -2. [Create a Docker access token](/security/for-developers/access-tokens/#create-an-access-token) +2. [Create a Docker access token](/security/access-tokens/#create-an-access-token) and store it in a plain text file at `/prometheus/prometheus/token` under the template directory. ```plaintext {title=token} @@ -241,7 +241,7 @@ and a Datadog site. $ cd scout-metrics-exporter/datadog ``` -2. [Create a Docker access token](/security/for-developers/access-tokens/#create-an-access-token) +2. [Create a Docker access token](/security/access-tokens/#create-an-access-token) and store it in a plain text file at `/datadog/token` under the template directory. ```plaintext {title=token} @@ -347,7 +347,7 @@ To change the scrape interval: ## Revoke an access token If you suspect that your PAT has been compromised or is no longer needed, you can revoke it at any time. -To revoke a PAT, follow the steps in the [Create and manage access tokens](/security/for-developers/access-tokens/#modify-existing-tokens). +To revoke a PAT, follow the steps in the [Create and manage access tokens](/security/access-tokens/#modify-existing-tokens). Revoking a PAT immediately invalidates the token, and prevents Prometheus from scraping metrics using that token. You will need to create a new PAT and update the Prometheus configuration to use the new token. diff --git a/content/manuals/scout/images/release-notes/artifactory-agent.gif b/content/manuals/scout/images/release-notes/artifactory-agent.gif deleted file mode 100644 index eaa7b6c3f9e9..000000000000 Binary files a/content/manuals/scout/images/release-notes/artifactory-agent.gif and /dev/null differ diff --git a/content/manuals/scout/install.md b/content/manuals/scout/install.md index 078f5db791a8..72a20e15113e 100644 --- a/content/manuals/scout/install.md +++ b/content/manuals/scout/install.md @@ -76,7 +76,7 @@ $ sh install-scout.sh 5. Authorize the binary to be executable on macOS: ```console - xattr -d com.apple.quarantine $HOME/.docker/scout/docker-scout. + xattr -d com.apple.quarantine $HOME/.docker/scout/docker-scout ``` 6. Add the `scout` subdirectory to your `.docker/config.json` as a plugin directory: diff --git a/content/manuals/scout/integrations/_index.md b/content/manuals/scout/integrations/_index.md index ce945300e096..7916377c8578 100644 --- a/content/manuals/scout/integrations/_index.md +++ b/content/manuals/scout/integrations/_index.md @@ -25,7 +25,6 @@ aren't hosted on Docker Hub. The following container registry integrations are available: -- [Artifactory](./registry/artifactory.md) - [Amazon Elastic Container Registry](./registry/ecr.md) - [Azure Container Registry](./registry/acr.md) diff --git a/content/manuals/scout/integrations/environment/cli.md b/content/manuals/scout/integrations/environment/cli.md index 8ebb44ae347e..1cb0cd66eb76 100644 --- a/content/manuals/scout/integrations/environment/cli.md +++ b/content/manuals/scout/integrations/environment/cli.md @@ -5,7 +5,7 @@ title: Generic environment integration with CLI linkTitle: Generic (CLI) --- -{{< include "scout-early-access.md" >}} +{{% include "scout-early-access.md" %}} You can create a generic environment integration by running the Docker Scout CLI client in your CI workflows. The CLI client is available as a binary on diff --git a/content/manuals/scout/integrations/environment/sysdig.md b/content/manuals/scout/integrations/environment/sysdig.md index c2430eaa0a5b..84415426b2f5 100644 --- a/content/manuals/scout/integrations/environment/sysdig.md +++ b/content/manuals/scout/integrations/environment/sysdig.md @@ -5,7 +5,7 @@ description: Integrate your runtime environments with Docker Scout using Sysdig keywords: scout, sysdig, integration, image analysis, environments, supply chain --- -{{< include "scout-early-access.md" >}} +{{% include "scout-early-access.md" %}} The Sysdig integration enables Docker Scout to automatically detect the images you're using for your running workloads. Activating this integration gives you diff --git a/content/manuals/scout/integrations/registry/artifactory.md b/content/manuals/scout/integrations/registry/artifactory.md deleted file mode 100644 index cc0d1774e5f4..000000000000 --- a/content/manuals/scout/integrations/registry/artifactory.md +++ /dev/null @@ -1,191 +0,0 @@ ---- -description: Integrate JFrog Artifactory and JFrog Container Registry with Docker Scout -keywords: docker scout, jfrog, artifactory, jcr, integration, image analysis, security, cves -title: Integrate Docker Scout with Artifactory -linkTitle: Artifactory -aliases: - - /scout/artifactory/ ---- - -Integrating Docker Scout with JFrog Artifactory lets you run image analysis -automatically on images in Artifactory registries. - -## Local image analysis - -You can analyze Artifactory images for vulnerabilities locally using Docker Desktop or the Docker CLI. You first need to authenticate with JFrog Artifactory using the [`docker login`](/reference/cli/docker/login/) command. For example: - -```bash -docker login {URL} -``` - -> [!TIP] -> -> For cloud-hosted Artifactory you can find the credentials for your Artifactory repository by -> selecting it in the Artifactory UI and then the **Set Me Up** button. - -## Remote image analysis - -To automatically analyze images running in remote environments you need to deploy the Docker Scout Artifactory agent. The agent is a -standalone service that analyzes images and uploads the result to Docker Scout. -You can view the results using the -[Docker Scout Dashboard](https://scout.docker.com/). - -### How the agent works - -The Docker Scout Artifactory agent is available as an -[image on Docker Hub](https://hub.docker.com/r/docker/artifactory-agent). The agent works by continuously polling -Artifactory for new images. When it finds a new image, it performs the following -steps: - -1. Pull the image from Artifactory -2. Analyze the image -3. Upload the analysis result to Docker Scout - -The agent records the Software Bill of Materials (SBOM) for the image, and the -SBOMs for all of its base images. The recorded SBOMs include both Operating -System (OS)-level and application-level programs or dependencies that the image -contains. - -Additionally, the agent sends the following metadata about the image to Docker Scout: - -- The source repository URL and commit SHA for the image -- Build instructions -- Build date -- Tags and digest -- Target platforms -- Layer sizes - -The agent never transacts the image -itself, nor any data inside the image, such as code, binaries, and layer blobs. - -The agent doesn't detect and analyze pre-existing images. It only analyzes -images that appear in the registry while the agent is running. - -### Deploy the agent - -This section describes the steps for deploying the Artifactory agent. - -#### Prerequisites - -Before you deploy the agent, ensure that you meet the prerequisites: - -- The server where you host the agent can access the following resources over - the network: - - Your JFrog Artifactory instance - - `hub.docker.com`, port 443, for authenticating with Docker - - `api.dso.docker.com`, port 443, for transacting data to Docker Scout -- The registries are Docker V2 registries. V1 registries aren't supported. - -The agent supports all versions of JFrog Artifactory and JFrog Container -Registry. - -#### Create the configuration file - -You configure the agent using a JSON file. The agent expects the configuration -file to be in `/opt/artifactory-agent/data/config.json` on startup. - -The configuration file includes the following properties: - -| Property | Description | -| --------------------------- | ------------------------------------------------------------------------------- | -| `agent_id` | Unique identifier for the agent. | -| `docker.organization_name` | Name of the Docker organization. | -| `docker.username` | Username of the admin user in the Docker organization. | -| `docker.pat` | Personal access token of the admin user with read and write permissions. | -| `artifactory.base_url` | Base URL of the Artifactory instance. | -| `artifactory.username` | Username of the Artifactory user with read permissions that the agent will use. | -| `artifactory.password` | Password or API token for the Artifactory user. | -| `artifactory.image_filters` | Optional: List of repositories and images to analyze. | - -If you don't specify any repositories in `artifactory.image_filters`, the agent -runs image analysis on all images in your Artifactory instance. - -The following snippet shows a sample configuration: - -```json -{ - "agent_id": "acme-prod-agent", - "docker": { - "organization_name": "acme", - "username": "mobythewhale", - "pat": "dckr_pat__dsaCAs_xL3kNyupAa7dwO1alwg" - }, - "artifactory": [ - { - "base_url": "https://acme.jfrog.io", - "username": "acmeagent", - "password": "hayKMvFKkFp42RAwKz2K", - "image_filters": [ - { - "repository": "dev-local", - "images": ["internal/repo1", "internal/repo2"] - }, - { - "repository": "prod-local", - "images": ["staging/repo1", "prod/repo1"] - } - ] - } - ] -} -``` - -Create a configuration file and save it somewhere on the server where you plan -to run the agent. For example, `/var/opt/artifactory-agent/config.json`. - -#### Run the agent - -The following example shows how to run the Docker Scout Artifactory agent using -`docker run`. This command creates a bind mount for the directory containing the -JSON configuration file created earlier at `/opt/artifactory-agent/data` inside -the container. Make sure the mount path you use is the directory containing the -`config.json` file. - - -> [!IMPORTANT] -> -> Use the `v1` tag of the Artifactory agent image. Don't use the `latest` tag as -> doing so may incur breaking changes. - -```console -$ docker run \ - --mount type=bind,src=/var/opt/artifactory-agent,target=/opt/artifactory-agent/data \ - docker/artifactory-agent:v1 -``` - -#### Analyzing pre-existing data - -By default the agent detects and analyzes images as they're created and -updated. If you want to use the agent to analyze pre-existing images, you -can use backfill mode. Use the `--backfill-from=TIME` command line option, -where `TIME` is an ISO 8601 formatted time, to run the agent in backfill mode. -If you use this option, the agent analyzes all images pushed between that -time and the current time when the agent starts, then exits. - -For example: - -```console -$ docker run \ - --mount type=bind,src=/var/opt/artifactory-agent,target=/opt/artifactory-agent/data \ - docker/artifactory-agent:v1 --backfill-from=2022-04-10T10:00:00Z -``` - -When running a backfill multiple times, the agent won't analyze images that -it's already analyzed. To force re-analysis, provide the `--force` command -line flag. - -### View analysis results - -You can view the image analysis results in the Docker Scout Dashboard. - -1. Go to [Images page](https://scout.docker.com/reports/images/) in the Docker Scout Dashboard. - - This page displays the Docker Scout-enabled repositories in your organization. - -2. Select the image in the list. -3. Select the tag. - -When you have selected a tag, you're taken to the vulnerability report for that -tag. Here, you can select if you want to view all vulnerabilities in the image, -or vulnerabilities introduced in a specific layer. You can also filter -vulnerabilities by severity, and whether or not there's a fix version available. diff --git a/content/manuals/scout/quickstart.md b/content/manuals/scout/quickstart.md index 86d58151ea4b..89ccd30cbdea 100644 --- a/content/manuals/scout/quickstart.md +++ b/content/manuals/scout/quickstart.md @@ -84,10 +84,9 @@ Learn more about the `docker scout cves` command in the ## Step 4: Fix application vulnerabilities -The fix suggested by Docker Scout is to update -the underlying vulnerable express version to 4.17.3 or later. +After the Docker Scout analysis, a high vulnerability CVE-2022-24999 was found, caused by an outdated version of the **express** package. -1. Update the `package.json` file with the new package version. +The version 4.17.3 of the express package fixes the vulnerability. Therefore, update the `package.json` file to the new version: ```diff "dependencies": { @@ -95,15 +94,14 @@ the underlying vulnerable express version to 4.17.3 or later. + "express": "4.17.3" } ``` - -2. Rebuild the image with a new tag and push it to your Docker Hub repository: + +Rebuild the image with a new tag and push it to your Docker Hub repository: ```console $ docker build --push -t /scout-demo:v2 . ``` -Now, viewing the latest tag of the image in Docker Desktop, the Docker Scout -Dashboard, or CLI, you can see that you have fixed the vulnerability. +Run the `docker scout` command again and verify that HIGH CVE-2022-24999 is no longer present: ```console $ docker scout cves --only-package express @@ -154,7 +152,7 @@ $ docker scout config organization Now you can run the `quickview` command to get an overview of the compliance status for the image you just built. -The image is evaluated against the default policy configurations. +The image is evaluated against the default policy configurations. You'll see output similar to the following: ```console $ docker scout quickview @@ -209,7 +207,7 @@ The classic image store doesn't support manifest lists, which is how the provenance attestations are attached to an image. Open **Settings** in Docker Desktop. Under the **General** section, make sure -that the **Use containerd for pulling and storing images** option is checked. +that the **Use containerd for pulling and storing images** option is checked, then select **Apply**. Note that changing image stores temporarily hides images and containers of the inactive image store until you switch back. @@ -230,7 +228,9 @@ results through a different lens: the Docker Scout Dashboard. 3. Select **Images** in the left-hand navigation. The images page lists your Scout-enabled repositories. -Select the image in the list to open the **Image details** sidebar. + +Select the row for the image you want to view, anywhere in the row except on a link, to open the **Image details** sidebar. + The sidebar shows a compliance overview for the last pushed tag of a repository. > [!NOTE] @@ -239,13 +239,15 @@ The sidebar shows a compliance overview for the last pushed tag of a repository. > It might take a few minutes before the results appear if this is your > first time using the Docker Scout Dashboard. -Inspect the **Up-to-Date Base Images** policy. +Go back to the image list and select the image version, available in the **Most recent image** column. +Then, at the top right of the page, select the **Update base image** button to inspect the policy. + This policy checks whether base images you use are up-to-date. It currently has a non-compliant status, because the example image uses an old version `alpine` as a base image. -Select the **View fix** button next to the policy name for details about the violation, -and recommendations on how to address it. +Close the **Recommended fixes for base image** modal. In the policy listing, select **View fixes** button, next to the policy name for details about the violation, and recommendations on how to address it. + In this case, the recommended action is to enable [Docker Scout's GitHub integration](./integrations/source-code-management/github.md), which helps keep your base images up-to-date automatically. diff --git a/content/manuals/scout/release-notes/cli.md b/content/manuals/scout/release-notes/cli.md index 1ed667573f57..353c35b8e1d0 100644 --- a/content/manuals/scout/release-notes/cli.md +++ b/content/manuals/scout/release-notes/cli.md @@ -410,7 +410,7 @@ Discarded in favor of [1.9.1](#191). instance by Docker Desktop there's no need anymore to re-index it on WSL2 side. - Indexing is now blocked in the CLI if it has been disabled using - [Settings Management](/manuals/security/for-admins/hardened-desktop/settings-management/_index.md) feature. + [Settings Management](/manuals/enterprise/security/hardened-desktop/settings-management/_index.md) feature. - Fix a panic that would occur when analyzing a single-image `oci-dir` input - Improve local attestation support with the containerd image store diff --git a/content/manuals/scout/release-notes/platform.md b/content/manuals/scout/release-notes/platform.md index 2ec1677b0cf5..c794f98ce80c 100644 --- a/content/manuals/scout/release-notes/platform.md +++ b/content/manuals/scout/release-notes/platform.md @@ -15,9 +15,6 @@ issues, and bug fixes in Docker Scout releases. These release notes cover the Docker Scout platform, including the Dashboard. For CLI release notes, refer to [Docker Scout CLI release notes](./cli.md). -Take a look at the [Docker Public Roadmap](https://github.com/docker/roadmap/projects/1) -for what's coming next. - ## Q4 2024 New features and enhancements released in the fourth quarter of 2024. @@ -300,12 +297,9 @@ documentation](../integrations/environment/sysdig.md). The new JFrog Artifactory integration enables automatic image analysis on Artifactory registries. -![Animation of how to integrate Artifactory](../images/release-notes/artifactory-agent.gif) - The integration involves deploying a Docker Scout Artifactory agent that polls for new images, performs analysis, and uploads results to Docker Scout, all -while preserving the integrity of image data. Learn more in the [Artifactory -integration documentation](../integrations/registry/artifactory.md) +while preserving the integrity of image data. #### Known limitations diff --git a/content/manuals/security/for-developers/2fa/_index.md b/content/manuals/security/2fa/_index.md similarity index 94% rename from content/manuals/security/for-developers/2fa/_index.md rename to content/manuals/security/2fa/_index.md index 70c17401f3b5..3008cfab1df3 100644 --- a/content/manuals/security/for-developers/2fa/_index.md +++ b/content/manuals/security/2fa/_index.md @@ -5,7 +5,8 @@ keywords: Docker, docker, registry, security, Docker Hub, authentication, two-fa title: Enable two-factor authentication for your Docker account linkTitle: Two-factor authentication aliases: -- /docker-hub/2fa/ + - /docker-hub/2fa/ + - /security/for-developers/2fa/ --- Two-factor authentication adds an extra layer of security to your Docker @@ -27,7 +28,7 @@ Authenticator with a registered YubiKey. 1. Sign in to your [Docker account](https://app.docker.com/login). 2. Select your avatar and then from the drop-down menu, select **Account settings**. -3. Navigate to the **Security** section, then select **Two-factor authentication**. +3. Select **2FA**. 4. Enter your account password, then select **Confirm**. 5. Save your recovery code and store it somewhere safe. You can use your recovery code to recover your account in the event you lose access to your authenticator app. 6. Use a Time-based One-time password (TOTP) mobile app to scan the QR code or enter the text code. diff --git a/content/manuals/security/for-developers/2fa/disable-2fa.md b/content/manuals/security/2fa/disable-2fa.md similarity index 85% rename from content/manuals/security/for-developers/2fa/disable-2fa.md rename to content/manuals/security/2fa/disable-2fa.md index a0fbb74027a9..c3e2ab06c21e 100644 --- a/content/manuals/security/for-developers/2fa/disable-2fa.md +++ b/content/manuals/security/2fa/disable-2fa.md @@ -5,7 +5,8 @@ keywords: Docker, docker, registry, security, Docker Hub, authentication, two-fa title: Disable two-factor authentication on your Docker account linkTitle: Disable two-factor authentication aliases: -- /docker-hub/2fa/disable-2fa/ + - /docker-hub/2fa/disable-2fa/ + - /security/for-developers/2fa/disable-2fa/ weight: 30 --- @@ -16,6 +17,6 @@ weight: 30 1. Sign in to your [Docker account](https://app.docker.com/login). 2. Select your avatar and then from the drop-down menu, select **Account settings**. -3. Navigate to the **Security** section, then select **Two-factor authentication**. +3. Select **2FA**. 4. Enter your password, then select **Confirm**. 5. Select **Disable 2FA**. diff --git a/content/manuals/security/for-developers/2fa/new-recovery-code.md b/content/manuals/security/2fa/new-recovery-code.md similarity index 85% rename from content/manuals/security/for-developers/2fa/new-recovery-code.md rename to content/manuals/security/2fa/new-recovery-code.md index e608ff55147b..1ec904930ec9 100644 --- a/content/manuals/security/for-developers/2fa/new-recovery-code.md +++ b/content/manuals/security/2fa/new-recovery-code.md @@ -4,7 +4,8 @@ keywords: Docker, docker, registry, security, Docker Hub, authentication, two-fa authentication, account security title: Generate a new recovery code aliases: -- /docker-hub/2fa/new-recovery-code/ + - /docker-hub/2fa/new-recovery-code/ + - /security/for-developers/2fa/new-recovery-code/ weight: 10 --- @@ -13,7 +14,7 @@ access to your Docker Hub account, you can generate a new recovery code. 1. Sign in to your [Docker account](https://app.docker.com/login). 2. Select your avatar and then from the drop-down menu, select **Account settings**. -3. Navigate to the **Security** section, then select **Manage Two-Factor Authentication**. +3. Select **2FA**. 4. Enter your password, then select **Confirm**. 5. Select **Generate new code**. diff --git a/content/manuals/security/for-developers/2fa/recover-hub-account.md b/content/manuals/security/2fa/recover-hub-account.md similarity index 88% rename from content/manuals/security/for-developers/2fa/recover-hub-account.md rename to content/manuals/security/2fa/recover-hub-account.md index e8424ccd43a4..d4c9d7925284 100644 --- a/content/manuals/security/for-developers/2fa/recover-hub-account.md +++ b/content/manuals/security/2fa/recover-hub-account.md @@ -4,7 +4,8 @@ keywords: Docker, docker, registry, security, Docker Hub, authentication, two-fa authentication title: Recover your Docker account aliases: -- /docker-hub/2fa/recover-hub-account/ + - /docker-hub/2fa/recover-hub-account/ + - /security/for-developers/2fa/recover-hub-account/ weight: 20 --- diff --git a/content/manuals/security/_index.md b/content/manuals/security/_index.md index 1d2ecf3b2403..798d18d796bd 100644 --- a/content/manuals/security/_index.md +++ b/content/manuals/security/_index.md @@ -1,69 +1,21 @@ --- -title: Security -description: Learn about security features Docker has to offer and explore best practices +title: Security for developers +linkTitle: Security +description: Learn about developer-level security features Docker has to offer and explore best practices keywords: docker, docker hub, docker desktop, security weight: 40 params: sidebar: group: Platform -grid_admins: -- title: Settings Management - description: Learn how Settings Management can secure your developers' workflows. - icon: shield_locked - link: /security/for-admins/hardened-desktop/settings-management/ -- title: Enhanced Container Isolation - description: Understand how Enhanced Container Isolation can prevent container attacks. - icon: security - link: /security/for-admins/hardened-desktop/enhanced-container-isolation/ -- title: Registry Access Management - description: Control the registries developers can access while using Docker Desktop. - icon: home_storage - link: /security/for-admins/hardened-desktop/registry-access-management/ -- title: Image Access Management - description: Control the images developers can pull from Docker Hub. - icon: photo_library - link: /security/for-admins/hardened-desktop/image-access-management/ -- title: "Air-Gapped Containers" - description: Restrict containers from accessing unwanted network resources. - icon: "vpn_lock" - link: /security/for-admins/hardened-desktop/air-gapped-containers/ -- title: Enforce sign-in - description: Configure sign-in for members of your teams and organizations. - link: /security/for-admins/enforce-sign-in/ - icon: passkey -- title: Domain audit - description: Identify uncaptured users in your organization. - link: /security/for-admins/domain-audit/ - icon: person_search -- title: Docker Scout - description: Explore how Docker Scout can help you create a more secure software supply chain. - icon: query_stats - link: /scout/ -- title: SSO - description: Learn how to configure SSO for your company or organization. - icon: key - link: /security/for-admins/single-sign-on/ -- title: SCIM - description: Set up SCIM to automatically provision and deprovision users. - icon: checklist - link: /security/for-admins/provisioning/scim/ -- title: Roles and permissions - description: Assign roles to individuals giving them different permissions within an organization. - icon: badge - link: /security/for-admins/roles-and-permissions/ -- title: Private marketplace for Extensions (Beta) - description: Learn how to configure and set up a private marketplace with a curated list of extensions for your Docker Desktop users. - icon: storefront - link: /desktop/extensions/private-marketplace/ grid_developers: - title: Set up two-factor authentication description: Add an extra layer of authentication to your Docker account. - link: /security/for-developers/2fa/ + link: /security/2fa/ icon: phonelink_lock - title: Manage access tokens description: Create personal access tokens as an alternative to your password. icon: password - link: /security/for-developers/access-tokens/ + link: /security/access-tokens/ - title: Static vulnerability scanning description: Automatically run a point-in-time scan on your Docker images for vulnerabilities. icon: image_search @@ -98,12 +50,6 @@ scale, manage, and secure your instances of Docker Desktop with DevOps security For both administrators and developers, Docker provides security-specific products such as Docker Scout, for securing your software supply chain with proactive image vulnerability monitoring and remediation strategies. -## For administrators - -Explore the security features Docker offers to satisfy your company's security policies. - -{{< grid items="grid_admins" >}} - ## For developers See how you can protect your local environments, infrastructure, and networks without impeding productivity. diff --git a/content/manuals/security/for-developers/access-tokens.md b/content/manuals/security/access-tokens.md similarity index 75% rename from content/manuals/security/for-developers/access-tokens.md rename to content/manuals/security/access-tokens.md index bda7607ba60f..503491451734 100644 --- a/content/manuals/security/for-developers/access-tokens.md +++ b/content/manuals/security/access-tokens.md @@ -5,7 +5,8 @@ description: Learn how to create and manage your personal Docker access tokens to securely push and pull images programmatically. keywords: docker hub, hub, security, PAT, personal access token aliases: -- /docker-hub/access-tokens/ + - /docker-hub/access-tokens/ + - /security/for-developers/access-tokens/ --- You can create a personal access token (PAT) to use as an alternative to your password for Docker CLI authentication. @@ -25,25 +26,18 @@ any time. Use the Docker Admin Console to create an access token. -1. Sign in to your [Docker account](https://app.docker.com/login). - -2. Select your avatar in the top-right corner and from the drop-down menu select **Account settings**. - -3. In the **Security** section, select **Personal access tokens**. - -4. Select **Generate new token**. - -5. Add a description for your token. Use something that indicates the use case or purpose of the token. - -6. Select the expiration date for the token. - -7. Set the access permissions. +1. Sign in to [Docker Home](https://app.docker.com/). +1. Select your avatar in the top-right corner and from the drop-down menu select **Account settings**. +1. Select **Personal access tokens**. +1. Select **Generate new token**. +1. Add a description for your token. Use something that indicates the use case or purpose of the token. +1. Select the expiration date for the token. +1. Set the access permissions. The access permissions are scopes that set restrictions in your repositories. For example, for Read & Write permissions, an automation pipeline can build an image and then push it to a repository. However, it can't delete the repository. - -8. Select **Generate** and then copy the token that appears on the screen and save it. You won't be able to retrieve the token once you close this prompt. +1. Select **Generate** and then copy the token that appears on the screen and save it. You won't be able to retrieve the token once you close this prompt. ## Use an access token @@ -75,17 +69,17 @@ When utilizing PATs, users should be aware that excessive creation of PATs could You can rename, activate, deactivate, or delete a token as needed. You can manage your tokens in your account settings. -1. Sign in to your [Docker account](https://app.docker.com/login). - -2. Select your avatar in the top-right corner and from the drop-down menu select **Account settings**. - -3. In the **Security** section, select **Personal access tokens**. - This page shows an overview of all your tokens, and lists if the token was generated manually or if it was [auto-generated](#auto-generated-tokens). You can also view the number - of tokens that are activated and deactivated in the toolbar. - -4. Select the actions menu on the far right of a token row, then select **Deactivate**, **Edit**, or **Delete** to modify the token. - -5. After modifying the token, select **Save token**. +1. Sign in to [Docker Home](https://app.docker.com/login). +1. Select your avatar in the top-right corner and from the drop-down menu select **Account settings**. +1. Select **Personal access tokens**. + + This page shows an overview of all your + tokens, and lists if the token was generated manually or if it was + [auto-generated](#auto-generated-tokens). You can also view the scope of the + tokens, which tokens are activate and inactive, when they were created, when + they were last used, and their expiration date. +1. Select the actions menu on the far right of a token row, then select **Deactivate** or **Activate**, **Edit**, or **Delete** to modify the token. +1. After editing the token, select **Save token**. ## Auto-generated tokens diff --git a/content/manuals/security/faqs/containers.md b/content/manuals/security/faqs/containers.md index 340099d0e2fd..45174ff6dfab 100644 --- a/content/manuals/security/faqs/containers.md +++ b/content/manuals/security/faqs/containers.md @@ -29,7 +29,7 @@ However note the following: which containers they run with such privileges to avoid security breaches by malicious container images. -* If [Enhanced Container Isolation (ECI)](/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/_index.md) +* If [Enhanced Container Isolation (ECI)](/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/_index.md) mode is enabled, then each container runs within a dedicated Linux User Namespace inside the Docker Desktop VM, which means the container has no privileges within the Docker Desktop VM. Even when using the `--privileged` diff --git a/content/manuals/security/faqs/general.md b/content/manuals/security/faqs/general.md index ce6f6dcd1873..54405768457e 100644 --- a/content/manuals/security/faqs/general.md +++ b/content/manuals/security/faqs/general.md @@ -31,11 +31,12 @@ You can configure this through SSO using your IdP. Check with your IdP if they s ### How are sessions managed and do they expire? -Docker Desktop uses tokens to manage sessions after a user signs in. Docker Desktop signs you out after 90 days, or 30 days of inactivity. +By default, Docker uses tokens to manage sessions after a user signs in: -In Docker Hub, you need to re-authenticate after 24 hours. If users are authenticating using SSO, the default session timeout for the IdP is respected. +- Docker Desktop signs you out after 90 days, or 30 days of inactivity. +- Docker Hub and Docker Home sign you out after 24 hours. -Custom settings per organization for sessions aren't supported. +Docker also supports your IdP's default session timeout. You can configure this by setting a Docker session minutes SAML attribute. For more information, see [SSO attributes](/manuals/enterprise/security/provisioning/_index.md#sso-attributes). ### How does Docker attribute downloads to us and what data is used to classify or verify the user is part of our organization? @@ -51,9 +52,9 @@ Some users authenticate by signing in to Docker Desktop and joining their domain Organizations set up in Docker use verified domains and any team member with an email domain other than what's verified is noted as a "Guest" in that organization. -### How long are Docker Hub logs available? +### How long are activity logs available? -Docker provides various types of audit logs and log retention varies. For example, Docker Hub Activity logs are available for 90 days. You are responsible for exporting logs or setting up drivers to their own internal systems. +Docker provides various types of audit logs and log retention varies. For example, Docker activity logs are available for 90 days. You are responsible for exporting logs or setting up drivers to their own internal systems. ### Can I export a list of all users with their assigned roles and privileges and if so, in what format? @@ -69,7 +70,12 @@ This is applicable only when using Docker Hub's application-level password versu ### How do we de-provision users who are not part of our IdP? We use SSO but not SCIM -If SCIM isn't enabled, you have to manually remove users from the organization in our system. Using SCIM automates this. +If SCIM isn't enabled, you have to manually remove users from the organization. +SCIM can automate this if your users are added after SCIM is enabled. Any users +added to your organization before SCIM is enabled must be removed manually. + +For more information on manually removing users, see +[Manage organization members](/manuals/admin/organization/members.md). ### What metadata is collected from container images that Scout analyzes? @@ -83,4 +89,4 @@ Extensions are not covered as part of Docker’s Third-Party Risk Management Pro ### Can I disable private repos in my organization via a setting to make sure nobody is pushing images into Docker Hub? -No. With [Registry Access Management](/manuals/security/for-admins/hardened-desktop/registry-access-management.md) (RAM), administrators can ensure that their developers using Docker Desktop only access allowed registries. This is done through the Registry Access Management dashboard on Docker Hub. +No. With [Registry Access Management](/manuals/enterprise/security/hardened-desktop/registry-access-management.md) (RAM), administrators can ensure that their developers using Docker Desktop only access allowed registries. This is done through the Registry Access Management dashboard in the Admin Console. diff --git a/content/manuals/security/faqs/single-sign-on/domain-faqs.md b/content/manuals/security/faqs/single-sign-on/domain-faqs.md index 1946068befcc..6f88ada90c50 100644 --- a/content/manuals/security/faqs/single-sign-on/domain-faqs.md +++ b/content/manuals/security/faqs/single-sign-on/domain-faqs.md @@ -1,7 +1,8 @@ --- description: Single sign-on domain FAQs keywords: Docker, Docker Hub, SSO FAQs, single sign-on, domains, domain verification, domain management -title: FAQS on SSO and domains +title: FAQs for SSO and domains +linkTitle: Domains tags: [FAQ] aliases: - /single-sign-on/domain-faqs/ @@ -18,8 +19,12 @@ You can do it one time to add the domain to a connection. If your organization e ### Is adding domain required to configure SSO? What domains should I be adding? And how do I add it? -Adding and verifying a domain is required to enable and enforce SSO. See [Configure single sign-on](/manuals/security/for-admins/single-sign-on/configure.md) for more information. This should include all email domains users will use to access Docker. Public domains, for example `gmail.com` or `outlook.com`, are not permitted. Also, the email domain should be set as the primary email. +Adding and verifying a domain is required to enable and enforce SSO. See [Configure single sign-on](/manuals/enterprise/security/single-sign-on/configure.md) for more information. This should include all email domains users will use to access Docker. Public domains, for example `gmail.com` or `outlook.com`, are not permitted. Also, the email domain should be set as the primary email. ### Is IdP-initiated authentication supported? IdP-initiated authentication isn't supported by Docker SSO. Users must initiate sign-in through Docker Desktop or Hub. + +### Can I verify the same domain on multiple organizations? + +You can't verify the same domain for multiple orgnaizations at the organization level. If you want to verify one domain for multiple organizations, you must have a Docker Business subscription, and [create a company](/manuals/admin/company/new-company.md). A company enables centralized management of organizations and allows domain verification at the company level. diff --git a/content/manuals/security/faqs/single-sign-on/enforcement-faqs.md b/content/manuals/security/faqs/single-sign-on/enforcement-faqs.md index 11d31eeecfa3..20528c61e89f 100644 --- a/content/manuals/security/faqs/single-sign-on/enforcement-faqs.md +++ b/content/manuals/security/faqs/single-sign-on/enforcement-faqs.md @@ -1,21 +1,18 @@ --- description: Single sign-on enforcement FAQs keywords: Docker, Docker Hub, SSO FAQs, single sign-on, enforce SSO, SSO enforcement -title: FAQs on SSO and enforcement +title: FAQs for SSO and enforcement +linkTitle: Enforcement tags: [FAQ] aliases: - /single-sign-on/enforcement-faqs/ - /faq/security/single-sign-on/enforcement-faqs/ --- -### We currently have a Docker Team subscription. How do we enable SSO? +### I currently have a Docker Team subscription. How do I enable SSO? SSO is available with a Docker Business subscription. To enable SSO, you must first upgrade your subscription to a Docker Business subscription. To learn how to upgrade your existing account, see [Upgrade your subscription](../../../subscription/change.md). -### How do service accounts work with SSO? - -Service accounts work like any other user when SSO is turned on. If the service account is using an email for a domain with SSO turned on, it needs a PAT for CLI and API usage. - ### Is DNS verification required to enable SSO? Yes. You must verify a domain before using it with an SSO connection. @@ -24,11 +21,11 @@ Yes. You must verify a domain before using it with an SSO connection. When SSO is enforced, [passwords are prevented from accessing the Docker CLI](/security/security-announcements/#deprecation-of-password-logins-on-cli-when-sso-enforced). You can still access the Docker CLI using a personal access token (PAT) for authentication. -Each user must create a PAT to access the CLI. To learn how to create a PAT, see [Manage access tokens](/security/for-developers/access-tokens/). Users who already used a PAT to sign in before SSO enforcement will still be able to use that PAT to authenticate. +Each user must create a PAT to access the CLI. To learn how to create a PAT, see [Manage access tokens](/security/access-tokens/). Users who already used a PAT to sign in before SSO enforcement will still be able to use that PAT to authenticate. -### How does SSO affect our automation systems and CI/CD pipelines? +### How does SSO affect automation systems and CI/CD pipelines? -Before enforcing SSO, you must create PATs for automation systems and CI/CD pipelines and use the tokens instead of a password. +Before enforcing SSO, you must [create PATs](/security/access-tokens/). These PATs are used instead of passwords for signing into automation systems and CI/CD pipelines. ### What can organization users who authenticated with personal emails prior to enforcement expect? @@ -38,26 +35,17 @@ Ensure your users have their organization email on their account, so that the ac Yes, you can choose to not enforce, and users have the option to use either Docker ID (standard email and password) or domain-verified email address (SSO) at the sign-in screen. -### SSO is enforced, but one of our users is able to sign in through username and password. Why is this happening? +### SSO is enforced, but a user can sign in using a username and password. Why is this happening? Guest users who are not part of your registered domain but have been invited to your organization do not sign in through your SSO Identity Provider. SSO enforcement only requires that users which do belong to your domain, must go through the SSO IdP. ### Is there a way to test this functionality in a test tenant with Okta before going to production? -Yes, you can create a test organization. Companies can set up a new 5 seat Business plan on a new organization to test with (making sure to only enable SSO, not enforce it or all domain email users will be forced to sign in to that test tenant). - -### Once we enable SSO for Docker Desktop, what's the impact to the flow for Build systems that use service accounts? - -If you enable SSO, there is no impact. Both username/password or personal access token (PAT) sign-in are supported. -However, if you enforce SSO: - -- Service Account domain email addresses must not be aliased and must be enabled in their IdP -- Username/password authentication [won’t work](/security/security-announcements/#deprecation-of-password-logins-on-cli-when-sso-enforced), so you should update the build system to use a PAT instead of a password -- Those who know the IdP credentials can sign in as that Service Account through SSO on Hub and create or change the personal access token for that service account. +Yes, you can create a test organization. Companies can set up a new 5 seat Business subscription on a new organization to test with. To do this, make sure to only enable SSO, not enforce it, or all domain email users will be forced to sign in to that test tenant. ### Is the sign in required tracking at runtime or install time? -At runtime for Docker Desktop if it’s configured to require authentication to the organization. +For Docker Desktop, if it's configured to require authentication to the organization, it tracks at runtime. ### What is enforcing SSO versus enforcing sign-in? @@ -65,7 +53,5 @@ Enforcing SSO and enforcing sign-in to Docker Desktop are different features tha Enforcing SSO ensures that users sign in using their SSO credentials instead of their Docker ID. One of the benefits is that SSO enables you to better manage user credentials. -Enforcing sign-in to Docker Desktop ensures that users always sign in to an - -account that's a member of your organization. The benefits are that your organization's security settings are always applied to the user's session and your users always receive the benefits of your subscription. For more details, see [Enforce sign-in for Desktop](../../../security/for-admins/enforce-sign-in/_index.md). +Enforcing sign-in to Docker Desktop ensures that users always sign in to an account that's a member of your organization. The benefits are that your organization's security settings are always applied to the user's session and your users always receive the benefits of your subscription. For more details, see [Enforce sign-in for Desktop](/manuals/enterprise/security/enforce-sign-in/_index.md#enforcing-sign-in-versus-enforcing-single-sign-on-sso). diff --git a/content/manuals/security/faqs/single-sign-on/faqs.md b/content/manuals/security/faqs/single-sign-on/faqs.md index 383ee8df8aa8..b59f5863c1bf 100644 --- a/content/manuals/security/faqs/single-sign-on/faqs.md +++ b/content/manuals/security/faqs/single-sign-on/faqs.md @@ -27,7 +27,7 @@ Docker supports Service Provider Initiated (SP-initiated) SSO flow. This means u ### Where can I find detailed instructions on how to configure Docker SSO? -You first need to establish an SSO connection with your identity provider, and the company email domain needs to be verified prior to establishing an SSO connection for your users. For detailed step-by-step instructions on how to configure Docker SSO, see [Single Sign-on](../../../security/for-admins/single-sign-on/configure/_index.md). +You first need to establish an SSO connection with your identity provider, and the company email domain needs to be verified prior to establishing an SSO connection for your users. For detailed step-by-step instructions on how to configure Docker SSO, see [Single Sign-on](/manuals/enterprise/security/single-sign-on/configure.md). ### Does Docker SSO support multi-factor authentication (MFA)? @@ -57,8 +57,13 @@ Directory.Read.All permission, which provides access to all users, groups, and other sensitive data in the directory. Due to potential security risks, Docker doesn't support this configuration. Instead, Docker recommends [configuring SCIM to enable group sync -securely](/security/for-admins/provisioning/group-mapping/#use-group-mapping-with-scim). +securely](/manuals/enterprise/security/provisioning/group-mapping.md#use-group-mapping-with-scim). ### Are there any firewall rules required for SSO configuration? -No. There are no specific firewall rules required for configuring SSO, as long as the domain `login.docker.com` is accessible. This domain is commonly accessible by default. However, in rare cases, some organizations may have firewall restrictions in place that block this domain. If you encounter issues during SSO setup, ensure that `login.docker.com` is allowed in your network's firewall settings. \ No newline at end of file +No. There are no specific firewall rules required for configuring SSO, as long as the domain `login.docker.com` is accessible. This domain is commonly accessible by default. However, in rare cases, some organizations may have firewall restrictions in place that block this domain. If you encounter issues during SSO setup, ensure that `login.docker.com` is allowed in your network's firewall settings. + +### Does Docker use my IdP's default session timeout? + +Yes, Docker supports your IdP's default session timeout using a custom SAML attribute. +Instead of relying on the standard `SessionNotOnOrAfter` element from the SAML spec, Docker uses a custom `dockerSessionMinutes` attribute to control session duration. See [SSO attributes](/manuals/enterprise/security/provisioning/_index.md#sso-attributes) for more information. \ No newline at end of file diff --git a/content/manuals/security/faqs/single-sign-on/idp-faqs.md b/content/manuals/security/faqs/single-sign-on/idp-faqs.md index 01aff89b9b17..b4d5f8c899c4 100644 --- a/content/manuals/security/faqs/single-sign-on/idp-faqs.md +++ b/content/manuals/security/faqs/single-sign-on/idp-faqs.md @@ -1,7 +1,8 @@ --- description: Single sign-on IdP FAQs keywords: Docker, Docker Hub, SSO FAQs, single sign-on, IdP -title: FAQs on SSO and identity providers +title: FAQs for SSO and identity providers +linkTitle: Identity providers tags: [FAQ] aliases: - /single-sign-on/idp-faqs/ @@ -10,11 +11,11 @@ aliases: ### Is it possible to use more than one IdP with Docker SSO? -No. You can only configure Docker SSO to work with a single IdP. A domain can only be associated with a single IdP. Docker supports Entra ID (formerly Azure AD) and identity providers that support SAML 2.0. +Yes. Docker supports multiple IdP configurations. A domain can be associated with multiple IdPs. Docker supports Entra ID (formerly Azure AD) and identity providers that support SAML 2.0. ### Is it possible to change my identity provider after configuring SSO? -Yes. You must delete your existing IdP configuration in your Docker SSO connection and then [configure SSO using your new IdP](/manuals/security/for-admins/single-sign-on/connect.md). If you had already turned on enforcement, you should turn off enforcement before updating the provider SSO connection. +Yes. You must delete your existing IdP configuration in your Docker SSO connection and then [configure SSO using your new IdP](/manuals/enterprise/security/single-sign-on/connect.md). If you had already turned on enforcement, you should turn off enforcement before updating the provider SSO connection. ### What information do I need from my identity provider to configure SSO? @@ -26,7 +27,7 @@ To enable SSO in Docker, you need the following from your IdP: ### What happens if my existing certificate expires? -If your existing certificate has expired, you may need to contact your identity provider to retrieve a new X.509 certificate. Then, you need to update the certificate in the [SSO configuration settings](/security/for-admins/single-sign-on/manage/#manage-sso-connections) in Docker Hub or Docker Admin Console. +If your existing certificate has expired, you may need to contact your identity provider to retrieve a new X.509 certificate. Then, you need to update the certificate in the [SSO configuration settings](/manuals/enterprise/security/single-sign-on/manage.md#manage-sso-connections) in Docker Hub or Docker Admin Console. ### What happens if my IdP goes down when SSO is enabled? @@ -56,4 +57,4 @@ Yes, Entra ID (formerly Azure AD) is supported with SSO for Docker Business, bot ### My SSO connection with Entra ID isn't working and I receive an error that the application is misconfigured. How can I troubleshoot this? -Confirm that you've configured the necessary API permissions in Entra ID (formerly Azure AD) for your SSO connection. You need to grant admin consent within your Entra ID (formerly Azure AD) tenant. See [Entra ID (formerly Azure AD) documentation](https://learn.microsoft.com/en-us/azure/active-directory/manage-apps/grant-admin-consent?pivots=portal#grant-admin-consent-in-app-registrations). +Confirm that you've configured the necessary API permissions in Entra ID (formerly Azure AD) for your SSO connection. You need to grant administrator consent within your Entra ID (formerly Azure AD) tenant. See [Entra ID (formerly Azure AD) documentation](https://learn.microsoft.com/en-us/azure/active-directory/manage-apps/grant-admin-consent?pivots=portal#grant-admin-consent-in-app-registrations). diff --git a/content/manuals/security/faqs/single-sign-on/users-faqs.md b/content/manuals/security/faqs/single-sign-on/users-faqs.md index 64a9c62ec45a..580ecf294531 100644 --- a/content/manuals/security/faqs/single-sign-on/users-faqs.md +++ b/content/manuals/security/faqs/single-sign-on/users-faqs.md @@ -1,7 +1,8 @@ --- description: Single sign-on user management FAQs keywords: Docker, Docker Hub, SSO FAQs, single sign-on -title: FAQs on SSO and managing users +title: FAQs for SSO and user management +linkTitle: User management tags: [FAQ] aliases: - /single-sign-on/users-faqs/ @@ -36,7 +37,7 @@ If users attempt to sign in through the CLI, they must authenticate using a pers ### Is it possible to force users of Docker Desktop to authenticate, and/or authenticate using their company’s domain? -Yes. Admins can [force users to authenticate with Docker Desktop](../../for-admins/enforce-sign-in/_index.md) using a registry key, `.plist` file, or `registry.json` file. +Yes. Admins can [force users to authenticate with Docker Desktop](/manuals/enterprise/security/enforce-sign-in/_index.md) using a registry key, `.plist` file, or `registry.json` file. Once SSO enforcement is set up on their Docker Business organization or company on Hub, when the user is forced to authenticate with Docker Desktop, the SSO enforcement will also force users to authenticate through SSO with their IdP (instead of authenticating using their username and password). @@ -51,7 +52,7 @@ Yes, you can convert existing users to an SSO account. To convert users from a n - Each user has created a PAT to replace their passwords to allow them to sign in through Docker CLI. - Confirm that all CI/CD pipelines automation systems have replaced their passwords with PATs. -For detailed prerequisites and instructions on how to enable SSO, see [Configure Single Sign-on](../../../security/for-admins/single-sign-on/configure/_index.md). +For detailed prerequisites and instructions on how to enable SSO, see [Configure Single Sign-on](/manuals/enterprise/security/single-sign-on/configure.md). ### What impact can users expect once we start onboarding them to SSO accounts? @@ -61,7 +62,7 @@ When SSO is enabled and enforced, your users just have to sign in using the veri Docker SSO provides Just-in-Time (JIT) provisioning by default, with an option to disable JIT. Users are provisioned when a user authenticates with SSO. If a user leaves the organization, administrators must sign in to Docker and manually [remove the user](../../../admin/organization/members.md#remove-a-member-or-invitee) from the organization. -[SCIM](../../../security/for-admins/provisioning/scim/) is available to provide full synchronization with users and groups. When you auto-provision users with SCIM, the recommended configuration is to disable JIT so that all auto-provisioning is handled by SCIM. +[SCIM](/manuals/enterprise/security/provisioning/scim.md) is available to provide full synchronization with users and groups. When you auto-provision users with SCIM, the recommended configuration is to disable JIT so that all auto-provisioning is handled by SCIM. Additionally, you can use the [Docker Hub API](/reference/api/hub/latest/) to complete this process. @@ -69,9 +70,9 @@ Additionally, you can use the [Docker Hub API](/reference/api/hub/latest/) to co The option to disable JIT is available when you use the Admin Console and enable SCIM. If a user attempts to sign in to Docker using an email address that is a verified domain for your SSO connection, they need to be a member of the organization to access it, or have a pending invitation to the organization. Users who don't meet these criteria will encounter an `Access denied` error, and will need an administrator to invite them to the organization. -See [SSO authentication with JIT provisioning disabled](/security/for-admins/provisioning/just-in-time/#sso-authentication-with-jit-provisioning-disabled). +See [SSO authentication with JIT provisioning disabled](/manuals/enterprise/security/provisioning/just-in-time.md#sso-authentication-with-jit-provisioning-disabled). -To auto-provision users without JIT provisioning, you can use [SCIM](/security/for-admins/provisioning/scim/). +To auto-provision users without JIT provisioning, you can use [SCIM](/manuals/enterprise/security/provisioning/scim.md). ### What's the best way to provision the Docker subscription without SSO? @@ -100,3 +101,7 @@ No, we don't differentiate the two in product. ### Is user information visible in Docker Hub? All Docker accounts have a public profile associated with their namespace. If you don't want user information (for example, full name) to be visible, you can remove those attributes from your SSO and SCIM mappings. Alternatively, you can use a different identifier to replace a user's full name. + +### What happens to existing licensed users when SCIM is enabled? + +Enabling SCIM does not immediately remove or modify existing licensed users in your Docker organization. They retain their current access and roles, but after enabling SCIM, you will manage them in your identity provider (IdP). If SCIM is later disabled, previously SCIM-managed users remain in Docker but are no longer automatically updated or removed based on your IdP. \ No newline at end of file diff --git a/content/manuals/security/for-admins/access-tokens.md b/content/manuals/security/for-admins/access-tokens.md deleted file mode 100644 index d7d60e6fa024..000000000000 --- a/content/manuals/security/for-admins/access-tokens.md +++ /dev/null @@ -1,130 +0,0 @@ ---- -title: Organization access tokens -description: Learn how to create and manage organization access tokens - to securely push and pull images programmatically. -keywords: docker hub, security, OAT, organization access token -linkTitle: Organization access tokens -params: - sidebar: - badge: - color: blue - text: Beta ---- - -{{< summary-bar feature_name="OATs" >}} - -> [!WARNING] -> -> Organization access tokens (OATs) are not intended to be used with Docker Desktop, and are incompatible. -> -> OATs are also currently incompatible with the following services: -> -> - Docker Build Cloud -> - Docker Scout -> - Docker REST APIs -> -> If you use Docker Desktop or one of these services, you must use personal access tokens instead. - -An organization access token (OAT) is like a [personal access token -(PAT)](/security/for-developers/access-tokens/), but an OAT is associated with -an organization and not a single user account. Use an OAT instead of a PAT to -let business-critical tasks access Docker Hub repositories without connecting -the token to single user. You must have a [Docker Team or Business -subscription](/subscription/core-subscription/details/) to use OATs. - -OATs provide the following advantages: - -- You can investigate when the OAT was last used and then disable or delete it - if you find any suspicious activity. -- You can limit what each OAT has access to, which limits the impact if an OAT - is compromised. -- All organization owners can manage OATs. If one owner leaves the organization, - the remaining owners can still manage the OATs. -- OATs have their own Docker Hub usage limits that don't count towards your - personal account's limits. - -If you have existing [service accounts](/docker-hub/service-accounts/), Docker recommends that you replace the service accounts with OATs. OATs offer the following advantages over service accounts: - -- Access permissions are easier to manage with OATs. You can assign access - permissions to OATs, while service accounts require using teams for access - permissions. -- OATs are easier to manage. OATs are centrally managed in the Admin Console. - For service accounts, you may need to sign in to that service account to - manage it. If using single sign-on enforcement and the service account is not - in your IdP, you may not be able to sign in to the service account to manage - it. -- OATs are not associated with a single user. If a user with access to the - service account leaves your organization, you may lose access to the service - account. OATs can be managed by any organization owner. - -## Create an organization access token - -> [!IMPORTANT] -> -> Treat access tokens like a password and keep them secret. Store your tokens securely in a credential manager for example. - -Organization owners can create up to 10 organization access tokens (OATs) for -organizations with a Team subscription and up to 100 OATs for organizations with -a Business subscription. Expired tokens count towards the total amount of -tokens. - -To create an OAT: - -1. Sign in to the [Admin Console](https://app.docker.com/admin). - -2. Select the organization you want to create an access token for. - -3. Under **Security and access**, select **Access tokens**. - -4. Select **Generate access token**. - -5. Add a label and optional description for your token. Use something that indicates the use case or purpose of the token. - -6. Select the expiration date for the token. - -7. Select the repository access for the token. - - The access permissions are scopes that set restrictions in your repositories. - For example, for Read & Write permissions, an automation pipeline can build - an image and then push it to a repository. However, it can't delete the - repository. You can select one of the following options: - - - **Public repositories (read only)** - - **All repositories**: You can select read access, or read and write access. - - **Select repositories**: You can select up to 50 repositories, and then - select read access, or read and write access for each repository. - -8. Select **Generate token** and then copy the token that appears on the screen - and save it. You won't be able to retrieve the token once you exit the - screen. - -## Use an organization access token - -You can use an organization access token when you sign in using Docker CLI. - -Sign in from your Docker CLI client with the following command, replacing -`YOUR_ORG` with your organization name: - -```console -$ docker login --username -``` - -When prompted for a password, enter your organization access token instead of a -password. - -## Modify existing tokens - -You can rename, update the description, update the repository access, -deactivate, or delete a token as needed. - -1. Sign in to the [Admin Console](https://app.docker.com/admin). - -2. Select the organization you want to modify an access token for. - -3. Under **Security and access**, select **Access tokens**. - -4. Select the actions menu on the far right of a token row, then select - **Deactivate**, **Edit**, or **Delete** to modify the token. For **Inactive** - tokens, you can only select **Delete**. - -5. If editing a token, select **Save** after specifying your modifications. diff --git a/content/manuals/security/for-admins/domain-audit.md b/content/manuals/security/for-admins/domain-audit.md deleted file mode 100644 index 412a002a52a2..000000000000 --- a/content/manuals/security/for-admins/domain-audit.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -description: Learn how to audit your domains for uncaptured users. -keywords: domain audit, security, identify users, manage users -title: Domain audit -aliases: -- /docker-hub/domain-audit/ -- /admin/company/settings/domains/ -- /admin/organization/security-settings/domains/ -weight: 50 ---- - -{{< summary-bar feature_name="Domain audit" >}} - -Domain audit identifies uncaptured users in an organization. Uncaptured users are Docker users who have authenticated to Docker using an email address associated with one of your verified domains, but they're not a member of your organization in Docker. You can audit domains on organizations that are part of the Docker Business subscription. To upgrade your existing account to a Docker Business subscription, see [Upgrade your subscription](/subscription/upgrade/). - -Uncaptured users who access Docker Desktop in your environment may pose a security risk because your organization's security settings, like Image Access Management and Registry Access Management, aren't applied to a user's session. In addition, you won't have visibility into the activity of uncaptured users. You can add uncaptured users to your organization to gain visibility into their activity and apply your organization's security settings. - -Domain audit can't identify the following Docker users in your environment: - -- Users who access Docker Desktop without authenticating -- Users who authenticate using an account that doesn't have an email address associated with one of your verified domains - -Although domain audit can't identify all Docker users in your environment, you can enforce sign-in to prevent unidentifiable users from accessing Docker Desktop in your environment. For more details about enforcing sign-in, see [Configure registry.json to enforce sign-in](../for-admins/enforce-sign-in/_index.md). - -> [!TIP] -> -> You can use endpoint management (MDM) software to identify the number of Docker Desktop instances and their versions within your environment. This can provide accurate license reporting, help ensure your machines use the latest version of Docker Desktop, and enable you to [enforce sign-in](enforce-sign-in/_index.md). -> - [Intune](https://learn.microsoft.com/en-us/mem/intune/apps/app-discovered-apps) -> - [Jamf](https://docs.jamf.com/10.25.0/jamf-pro/administrator-guide/Application_Usage.html) -> - [Kandji](https://support.kandji.io/support/solutions/articles/72000559793-view-a-device-application-list) -> - [Kolide](https://www.kolide.com/features/device-inventory/properties/mac-apps) -> - [Workspace One](https://blogs.vmware.com/euc/2022/11/how-to-use-workspace-one-intelligence-to-manage-app-licenses-and-reduce-costs.html) - -## Prerequisites - -Before you audit your domains, review the following required prerequisites: - -- Your organization must be part of a Docker Business subscription. To upgrade your existing account to a Docker Business subscription, see [Upgrade your subscription](../../subscription/change.md). -- You must [add and verify your domains](./single-sign-on/configure/_index.md#step-one-add-and-verify-your-domain). - -> [!IMPORTANT] -> -> Domain audit is not supported for companies or organizations within a company. - -## Audit your domains for uncaptured users - -{{< tabs >}} -{{< tab name="Docker Hub" >}} - -{{% admin-domain-audit product="hub" %}} - -{{< /tab >}} -{{< tab name="Admin Console" >}} - -{{< include "admin-early-access.md" >}} - -{{% admin-domain-audit product="admin" %}} - -{{< /tab >}} -{{< /tabs >}} - diff --git a/content/manuals/security/for-admins/hardened-desktop/registry-access-management.md b/content/manuals/security/for-admins/hardened-desktop/registry-access-management.md deleted file mode 100644 index 2c12977816b1..000000000000 --- a/content/manuals/security/for-admins/hardened-desktop/registry-access-management.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -description: Control access to approved registries with Registry Access Management, ensuring secure Docker Desktop usage -keywords: registry, access, management, permissions, Docker Business feature, security, admin -title: Registry Access Management -tags: [admin] -aliases: - - /desktop/hardened-desktop/registry-access-management/ - - /admin/organization/registry-access/ - - /docker-hub/registry-access-management/ - - /security/for-admins/registry-access-management/ -weight: 30 ---- - -{{< summary-bar feature_name="Registry access management" >}} - -With Registry Access Management (RAM), administrators can ensure that their developers using Docker Desktop only access allowed registries. This is done through the Registry Access Management dashboard in Docker Hub or the Docker Admin Console. - -Registry Access Management supports both cloud and on-prem registries. This feature operates at the DNS level and therefore is compatible with all registries. You can add any hostname or domain name you’d like to include in the list of allowed registries. However, if the registry redirects to other domains such as `s3.amazon.com`, then you must add those domains to the list. - -Example registries administrators can allow include: - - - Docker Hub. This is enabled by default. - - Amazon ECR - - GitHub Container Registry - - Google Container Registry - - GitLab Container Registry - - Nexus - - Artifactory - -## Prerequisites - -You need to [enforce sign-in](../enforce-sign-in/_index.md). For Registry Access -Management to take effect, Docker Desktop users must authenticate to your -organization. Enforcing sign-in ensures that your Docker Desktop developers -always authenticate to your organization, even though they can authenticate -without it and the feature will take effect. Enforcing sign-in guarantees the -feature always takes effect. - -## Configure Registry Access Management permissions - -{{< tabs >}} -{{< tab name="Docker Hub" >}} - -{{% admin-registry-access product="hub" %}} - -{{< /tab >}} -{{< tab name="Admin Console" >}} - -{{< include "admin-early-access.md" >}} - -{{% admin-registry-access product="admin" %}} - -{{< /tab >}} -{{< /tabs >}} - -## Verify the restrictions - -The new Registry Access Management policy takes effect after the developer successfully authenticates to Docker Desktop using their organization credentials. If a developer attempts to pull an image from a disallowed registry via the Docker CLI, they receive an error message that the organization has disallowed this registry. - -## Caveats - -There are certain limitations when using Registry Access Management: - -- Windows image pulls and image builds are not restricted by default. For Registry Access Management to take effect on Windows Container mode, you must allow the Windows Docker daemon to use Docker Desktop's internal proxy by selecting the [Use proxy for Windows Docker daemon](/manuals/desktop/settings-and-maintenance/settings.md#proxies) setting. -- Builds such as `docker buildx` using a Kubernetes driver are not restricted -- Builds such as `docker buildx` using a custom docker-container driver are not restricted -- Blocking is DNS-based; you must use a registry's access control mechanisms to distinguish between “push” and “pull” -- WSL 2 requires at least a 5.4 series Linux kernel (this does not apply to earlier Linux kernel series) -- Under the WSL 2 network, traffic from all Linux distributions is restricted (this will be resolved in the updated 5.15 series Linux kernel) -- Images pulled by Docker Desktop when Docker Debug or Kubernetes is enabled, are not restricted by default even if Docker Hub is blocked by RAM. - -Also, Registry Access Management operates on the level of hosts, not IP addresses. Developers can bypass this restriction within their domain resolution, for example by running Docker against a local proxy or modifying their operating system's `sts` file. Blocking these forms of manipulation is outside the remit of Docker Desktop. - -## More resources - -- [Video: Hardened Desktop Registry Access Management](https://www.youtube.com/watch?v=l9Z6WJdJC9A) diff --git a/content/manuals/security/for-admins/hardened-desktop/settings-management/_index.md b/content/manuals/security/for-admins/hardened-desktop/settings-management/_index.md deleted file mode 100644 index f0524e819138..000000000000 --- a/content/manuals/security/for-admins/hardened-desktop/settings-management/_index.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -description: Understand how Settings Management works, who it is for, and what the - benefits are -keywords: Settings Management, rootless, docker desktop, hardened desktop -tags: [admin] -title: What is Settings Management? -linkTitle: Settings Management -aliases: - - /desktop/hardened-desktop/settings-management/ -weight: 10 ---- - -{{< summary-bar feature_name="Hardened Docker Desktop" >}} - -Settings Management helps you control key Docker Desktop settings, like proxies and network configurations, on your developers' machines within your organization. - -For an extra layer of security, you can also use Settings Management to enable and lock in [Enhanced Container Isolation](../enhanced-container-isolation/_index.md), which prevents containers from modifying any Settings Management configurations. - -## Who is it for? - -- For organizations that want to configure Docker Desktop to be within their organization's centralized control. -- For organizations that want to create a standardized Docker Desktop environment at scale. -- For Docker Business customers who want to confidently manage their use of Docker Desktop within tightly regulated environments. - -## How does it work? - -You can configure several Docker Desktop settings using either: - - An `admin-settings.json` file. This file is located on the Docker Desktop host and can only be accessed by developers with root or administrator privileges. - - Creating a settings policy in the Docker Admin Console - -Settings that are defined by an administrator override any previous values set by developers and ensure that these cannot be modified. - -## What features can I configure with Settings Management? - -Using the `admin-settings.json` file, you can: - -- Turn on and lock in [Enhanced Container Isolation](../enhanced-container-isolation/_index.md) -- Configure HTTP proxies -- Configure network settings -- Configure Kubernetes settings -- Enforce the use of WSL 2 based engine or Hyper-V -- Enforce the use of Rosetta for x86_64/amd64 emulation on Apple Silicon -- Configure Docker Engine -- Turn off Docker Desktop's ability to checks for updates -- Turn off Docker Extensions -- Turn off Docker Scout SBOM indexing -- Turn off beta and experimental features -- Turn off Docker AI ([Ask Gordon](../../../../desktop/features/gordon.md)) -- Turn off Docker Desktop's onboarding survey -- Control whether developers can use the Docker terminal -- Control the file sharing implementation for your developers on macOS -- Specify which paths your developers can add file shares to -- Configure Air-gapped containers - -For more details on the syntax and options, see [Configure Settings Management](configure-json-file.md). - -## How do I set up and enforce Settings Management? - -You first need to [enforce sign-in](/manuals/security/for-admins/enforce-sign-in/_index.md) to ensure that all Docker Desktop developers authenticate with your organization. Since the Settings Management feature requires a Docker Business subscription, enforced sign-in guarantees that only authenticated users have access and that the feature consistently takes effect across all users, even though it may still work without enforced sign-in. - -Next, you must either: - - Manually [create and configure the `admin-settings.json` file](configure-json-file.md), or use the `--admin-settings` installer flag on [macOS](/manuals/desktop/setup/install/mac-install.md#install-from-the-command-line) or [Windows](/manuals/desktop/setup/install/windows-install.md#install-from-the-command-line) to automatically create the `admin-settings.json` and save it in the correct location. - - Fill out the **Settings policy** creation form in the [Docker Admin Console](configure-admin-console.md). - -Once this is done, Docker Desktop developers receive the changed settings when they either: -- Quit, re-launch, and sign in to Docker Desktop -- Launch and sign in to Docker Desktop for the first time - -To avoid disrupting your developers' workflows, Docker doesn't automatically require that developers re-launch and re-authenticate once a change has been made. - -## What do developers see when the settings are enforced? - -Enforced settings appear grayed out in Docker Desktop. They can't be edited via the Docker Desktop Dashboard, CLI, or `settings-store.json` (or `settings.json` for Docker Desktop 4.34 and earlier). - -In addition, if Enhanced Container Isolation is enforced, developers can't use privileged containers or similar techniques to modify enforced settings within the Docker Desktop Linux VM. For example, they can't reconfigure proxy and networking, or Docker Engine. - -![Proxy settings grayed out](/assets/images/grayed-setting.png) - -## What's next? - -- [Configure Settings Management with a `.json` file](configure-json-file.md) -- [Configure Settings Management with the Docker Admin Console](configure-admin-console.md) diff --git a/content/manuals/security/for-admins/hardened-desktop/settings-management/configure-admin-console.md b/content/manuals/security/for-admins/hardened-desktop/settings-management/configure-admin-console.md deleted file mode 100644 index 1c6f44d371a1..000000000000 --- a/content/manuals/security/for-admins/hardened-desktop/settings-management/configure-admin-console.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -description: How to configure Settings Management for Docker Desktop using the Docker Admin Console -keywords: admin, controls, rootless, enhanced container isolation -title: Configure Settings Management with the Admin Console -linkTitle: Use the Admin Console -weight: 20 -params: - sidebar: - badge: - color: violet - text: EA ---- - -{{< summary-bar feature_name="Admin Console" >}} - -This page contains information for administrators on how to configure Settings Management with the Docker Admin Console. You can specify and lock configuration parameters to create a standardized Docker Desktop environment across your Docker company or organization. - -## Prerequisites - -- [Download and install Docker Desktop 4.36.0 or later](/manuals/desktop/release-notes.md). -- [Verify your domain](/manuals/security/for-admins/single-sign-on/configure.md#step-one-add-and-verify-your-domain). -- [Enforce sign-in](/manuals/security/for-admins/enforce-sign-in/_index.md). The Settings Management feature requires a Docker Business -subscription, therefore your Docker Desktop users must authenticate to your -organization for configurations to take effect. - -## Create a settings policy - -1. Within the [Docker Admin Console](https://admin.docker.com/) navigate to the company or organization you want to define a settings policy for. -2. Under the **Security and access** section, select **Desktop Settings Management**. -3. In the top-right corner, select **Create a settings policy**. -4. Give your settings policy a name and an optional description. - - > [!TIP] - > - > If you have already configured Settings Management with an `admin-settings.json` file for an organization, you can upload it using the **Upload existing settings** button which then automatically populates the form for you. - > - > Settings policies deployed via the Docker Admin Console take precedence over manually deployed `admin-settings.json` files. - -5. Assign the setting policy to all your users within the company or organization, or specific users. - - > [!NOTE] - > - > If a settings policy is assigned to all users, it sets the policy as the global default policy. You can only have one global settings policy at a time. - > If a user already has a user-specific settings policy assigned, the user-specific policy takes precedence over a global policy. - - > [!TIP] - > - > Before setting a global settings policy, it is recommended that you first test it as a user-specific policy to make sure you're happy with the changes before proceeding. - -6. Configure the settings for the policy. Go through each setting and select your chosen setting state. You can choose: - - **User-defined**. Your developers are able to control and change this setting. - - **Always enabled**. This means the setting is turned on and your users won't be able to edit this setting from Docker Desktop or the CLI. - - **Enabled**. The setting is turned on and users can edit this setting from Docker Desktop or the CLI. - - **Always disabled**. This means the setting is turned off and your users won't be able to edit this setting from Docker Desktop or the CLI. - - **Disabled**. The setting is turned off and users can edit this setting from Docker Desktop or the CLI. -7. Select **Create** - -For the settings policy to take effect: -- On a new install, users need to launch Docker Desktop and authenticate to their organization. -- On an existing install, users need to quit Docker Desktop through the Docker menu, and then re-launch Docker Desktop. If they are already signed in, they don't need to sign in again for the changes to take effect. - - > [!IMPORTANT] - > - > Selecting **Restart** from the Docker menu isn't enough as it only restarts some components of Docker Desktop. - -To avoid disrupting your users' workflows, Docker doesn't automatically require that users re-launch once a change has been made. - -> [!NOTE] -> -> Settings are synced to Docker Desktop and the CLI when a user is signed in and starts Docker Desktop, and then every 60 minutes. - -If your settings policy needs to be rolled back, either delete the policy or edit the policy to set individual settings to **User-defined**. - -## Settings policy actions - -From the **Actions** menu on the **Desktop Settings Management** page in the Docker Admin Console, you can: -- Edit or delete an existing settings policy. -- Export a settings policy as an `admin-settings.json` file. -- Promote a policy that is applied to a select group of users, to be the new global default policy for all users. \ No newline at end of file diff --git a/content/manuals/security/for-admins/provisioning/scim.md b/content/manuals/security/for-admins/provisioning/scim.md deleted file mode 100644 index 2a0b57b834f3..000000000000 --- a/content/manuals/security/for-admins/provisioning/scim.md +++ /dev/null @@ -1,267 +0,0 @@ ---- -keywords: SCIM, SSO, user provisioning, de-provisioning, role mapping, assign users -title: SCIM provisioning -linkTitle: SCIM -description: Learn how System for Cross-domain Identity Management works and how to set it up. -aliases: - - /security/for-admins/scim/ - - /docker-hub/scim/ -weight: 30 ---- - -{{< summary-bar feature_name="SSO" >}} - -System for Cross-domain Identity Management (SCIM) is available for Docker Business customers. This guide provides an overview of SCIM provisioning. - -## How SCIM works - -SCIM offers automated user provisioning and de-provisioning for Docker through your identity provider (IdP). Once SCIM is enabled, users assigned to the Docker application in your IdP are automatically provisioned and added to your Docker organization. If a user is unassigned, they are removed from Docker. - -SCIM also syncs user profile updates, such as name changes, made in your IdP. SCIM can be used with Docker’s default Just-in-Time (JIT) provisioning configuration, or on its own with JIT disabled. - -SCIM supports the automation of: -- Creating users -- Updating user profiles -- Removing and deactivating users -- Re-activating users -- Group mapping - -## Supported attributes - -> [!IMPORTANT] -> -> Docker uses JIT provisioning by default for SSO configurations. If you enable SCIM, JIT values still overwrite the attribute -values set by SCIM provisioning. To avoid conflicts, your JIT attribute values must match your SCIM attribute values. To avoid conflicts between SCIM and JIT, you can also disable JIT provisioning. See [Just-in-Time](/manuals/security/for-admins/provisioning/just-in-time.md) for more information. - -Attributes are pieces of user information, such as name and email, that are synchronized between your IdP and Docker when using SCIM. Proper mapping of these attributes is essential for seamless user provisioning and to prevent duplicate entries when using SSO. - -The following table lists the supported attributes for SCIM: - -| Attribute | Description | -|:---------------------------------------------------------------|:-------------------------------------------------------------------------------------------| -| userName | User’s primary email address, used as the unique identifier | -| name.givenName | User’s first name | -| name.familyName | User’s surname | -| active | Indicates if a user is enabled or disabled, set to “false” to de-provision a user | - -For additional details about supported attributes and SCIM, see [Docker Hub API SCIM reference](/reference/api/hub/latest/#tag/scim). - -## Enable SCIM in Docker - -You must [configure SSO](../single-sign-on/configure/_index.md) before you enable SCIM. Enforcing SSO isn't required to use SCIM. - -{{< tabs >}} -{{< tab name="Admin Console" >}} - -{{< include "admin-early-access.md" >}} - -{{% admin-scim product="admin" %}} - -{{< /tab >}} -{{< tab name="Docker Hub" >}} - -{{% admin-scim %}} - -{{< /tab >}} -{{< /tabs >}} - -## Enable SCIM in your IdP - -The user interface for your IdP may differ slightly from the following steps. You can refer to the documentation for your IdP to verify. For additional details, see the documentation for your IdP: - -- [Okta](https://help.okta.com/en-us/Content/Topics/Apps/Apps_App_Integration_Wizard_SCIM.htm) -- [Entra ID (formerly Azure AD)](https://learn.microsoft.com/en-us/azure/active-directory/app-provisioning/user-provisioning) - -{{< tabs >}} -{{< tab name="Okta" >}} - -### Enable SCIM - -1. Sign in to Okta and select **Admin** to open the admin portal. -2. Open the application you created when you configured your SSO connection. -3. On the application page, select the **General** tab, then **Edit App Settings**. -4. Enable SCIM provisioning, then select **Save**. -5. Now you can access the **Provisioning** tab in Okta. Navigate to this tab, then select **Edit SCIM Connection**. -6. To configure SCIM in Okta, set up your connection using the following values and settings: - - SCIM Base URL: SCIM connector base URL (copied from Docker Hub) - - Unique identifier field for users: `email` - - Supported provisioning actions: **Push New Users** and **Push Profile Updates** - - Authentication Mode: HTTP Header - - SCIM Bearer Token: HTTP Header Authorization Bearer Token (copied from Docker Hub) -7. Select **Test Connector Configuration**. -8. Review the test results and select **Save**. - -### Enable synchronization - -1. In Okta, select **Provisioning**. -2. Select **To App**, then **Edit**. -3. Enable **Create Users**, **Update User Attributes**, and **Deactivate Users**. -4. Select **Save**. -5. Remove unnecessary mappings. The necessary mappings are: - - Username - - Given name - - Family name - - Email - -{{< /tab >}} -{{< tab name="Entra ID SAML 2.0" >}} - -1. In the Azure admin portal, go to **Enterprise Applications**, then select the **Docker** application you created when you set up your SSO connection. -2. Select **Provisioning**, then **Get Started**. -3. Select **Automatic** provisioning mode. -4. Enter the **SCIM Base URL** and **API Token** from Docker into the **Admin Credentials** form. -5. Test the connection, then select **Save**. -6. Go to **Mappings**, then select **Provision Azure Active Directory Groups**. -7. Set the **Enabled** value to **No**. -8. Select **Provision Azure Active Directory Users**. -9. Remove all unsupported attributes. -10. Select **Save**. -11. Set the provisioning status to **On**. - -{{< /tab >}} -{{< /tabs >}} - -## Set up role mapping - -You can assign [roles](/security/for-admins/roles-and-permissions/) to members in your organization in your IdP. To set up a role, you can use optional user-level attributes for the person you want to assign a role. In addition to roles, you can set an organization or team to override the default provisioning values set by the SSO connection. - -> [!NOTE] -> -> Role mappings are supported for both SCIM and JIT provisioning. With JIT provisioning, role mapping only applies when a user is initially provisioned to the organization. - -The following table lists the supported optional user-level attributes. - -| Attribute | Possible values | Considerations | -| --------- | ------------------ | -------------- | -| `dockerRole` | `member`, `editor`, or `owner`, for a list of permissions for each role, see [Roles and permissions](/security/for-admins/roles-and-permissions/) | If you don't assign a role in the IdP, the value of the `dockerRole` attribute defaults to `member`. When you set the attribute, this overrides the default value. | -| `dockerOrg` | `organizationName`, for example, an organization named "moby" would be `moby` | Setting this attribute overrides the default organization configured by the SSO connection. Also, this won't add the user to the default team. If this attribute isn't set, the user is provisioned to the default organization and the default team. If set and `dockerTeam` is also set, this provisions the user to the team within that organization. | -| `dockerTeam` | `teamName`, for example, a team named "developers" would be `developers` | Setting this attribute provisions the user to the default organization and to the specified team, instead of the SSO connection's default team. This also creates the team if it doesn't exist. You can still use group mapping to provision users to teams in multiple organizations. See [Group mapping](/security/for-admins/provisioning/group-mapping/) for more details. | - -After you set the role in the IdP, you must initiate a sync in your IdP to push the changes to Docker. - -The external namespace to use to set up these attributes is `urn:ietf:params:scim:schemas:extension:docker:2.0:User`. - -{{< tabs >}} -{{< tab name="Okta" >}} - -### Set up role mapping in Okta - -1. Setup [SSO](../single-sign-on/configure/_index.md) and SCIM first. -2. In the Okta admin portal, go to **Directory**, select **Profile Editor**, and then **User (Default)**. -3. Select **Add Attribute** and configure the values for the role, organization, or team you want to add. Exact naming isn't required. -4. Return to the **Profile Editor** and select your application. -5. Select **Add Attribute** and enter the required values. The **External Name** and **External Namespace** must be exact. The external name values for organization/team/role mapping are `dockerOrg`, `dockerTeam`, and `dockerRole` respectively, as listed in the previous table. The external namespace is the same for all of them: `urn:ietf:params:scim:schemas:extension:docker:2.0:User`. -6. After creating the attributes, navigate to the top of the page and select **Mappings**, then **Okta User to YOUR APP**. -7. Go to the newly created attributes and map the variable names to the external names, then select **Save Mappings**. If you’re using JIT provisioning, continue to the following steps. -8. Navigate to **Applications** and select **YOUR APP**. -9. Select **General**, then **SAML Settings**, and **Edit**. -10. Select **Step 2** and configure the mapping from the user attribute to the Docker variables. - -### Assign roles by user - -1. In the Okta admin portal, select **Directory**, then **People**. -2. Select **Profile**, then **Edit**. -3. Select **Attributes** and update the attributes to the desired values. - -### Assign roles by group - -1. In the Okta admin portal, select **Directory**, then **People**. -2. Select **YOUR GROUP**, then **Applications**. -3. Open **YOUR APPLICATION** and select the **Edit** icon. -4. Update the attributes to the desired values. - -If a user doesn't already have attributes set up, users who are added to the group will inherit these attributes upon provisioning. - -{{< /tab >}} -{{< tab name="Entra ID SAML 2.0" >}} - -### Set up role mapping in Azure AD - -1. Setup [SSO](../single-sign-on/configure/_index.md) and SCIM first. -2. In the Azure AD admin portal, open **Enterprise Apps** and select **YOUR APP**. -3. Select **Provisioning**, then **Mappings**, and **Provision Azure Active Directory Users**. -4. To set up the new mapping, check **Show advanced options**, then select **Edit attribute options**. -5. Create new entries with the desired mapping for role, organization, or group (for example, `urn:ietf:params:scim:schemas:extension:docker:2.0:User:dockerRole`) as a string type. -6. Navigate back to **Attribute Mapping** for users and select **Add new mapping**. - -### Expression mapping - -This implementation works best for roles, but can't be used along with organization and team mapping using the same method. With this approach, you can assign attributes at a group level, which members can inherit. This is the recommended approach for role mapping. - -1. In the **Edit Attribute** view, select the **Expression** mapping type. -2. If you can create app roles named as the role directly (for example, `owner` or `editor`), in the **Expression** field, you can use `SingleAppRoleAssignment([appRoleAssignments])`. - - Alternatively, if you’re restricted to using app roles you have already defined (for example, `My Corp Administrators`) you’ll need to setup a switch for these roles. For example: - - ```text - Switch(SingleAppRoleAssignment([appRoleAssignments]), "member", "My Corp Administrator", "owner", "My Corp Editor", "editor")` - ``` -3. Set the following fields: - - **Target attribute**: `urn:ietf:params:scim:schemas:extension:docker:2.0:User:dockerRole` - - **Match objects using this attribute**: No - - **Apply this mapping**: Always -4. Save your configuration. - -### Direct mapping - -Direct mapping is an alternative to expression mapping. This implementation works for all three mapping types at the same time. In order to assign users, you'll need to use the Microsoft Graph API. - -1. In the **Edit Attribute** view, select the **Direct** mapping type. -2. Set the following fields: - - **Source attribute**: choose one of the allowed extension attributes in Entra (for example, `extensionAttribute1`) - - **Target attribute**: `urn:ietf:params:scim:schemas:extension:docker:2.0:User:dockerRole` - - **Match objects using this attribute**: No - - **Apply this mapping**: Always - - If you're setting more than one attribute, for example role and organization, you need to choose a different extension attribute for each one. -3. Save your configuration. - -### Assign users - -If you used expression mapping in the previous step, navigate to **App registrations**, select **YOUR APP**, and **App Roles**. Create an app role for each Docker role. If possible, create it with a display name that is directly equivalent to the role in Docker, for example, `owner` instead of `Owner`. If set up this way, then you can use expression mapping to `SingleAppRoleAssignment([appRoleAssignments])`. Otherwise, a custom switch will have to be used. See [Expression mapping](#expression-mapping). - -To add a user: -1. Select **YOUR APP**, then **Users and groups**. -2. Select **Add user/groups**, select the user you want to add, then **Select** their desired role. - -To add a group: -1. Select **YOUR APP**, then **Users and groups**. -2. Select **Add user/groups**, select the user you want to add, then **Select** their desired role. - -If you used direct mapping in the previous step, go to **Microsoft Graph Explorer** and sign in to your tenant. You need to be a tenant admin to use this feature. Use the Microsoft Graph API to assign the extension attribute to the user with the value that corresponds to what the attribute was mapped to. See the [Microsoft Graph API documentation](https://learn.microsoft.com/en-us/graph/extensibility-overview?tabs=http) on adding or updating data in extension attributes. - -{{< /tab >}} -{{< /tabs >}} - -See the documentation for your IdP for additional details: - -- [Okta](https://help.okta.com/en-us/Content/Topics/users-groups-profiles/usgp-add-custom-user-attributes.htm) -- [Entra ID (formerly Azure AD)](https://learn.microsoft.com/en-us/azure/active-directory/app-provisioning/customize-application-attributes#provisioning-a-custom-extension-attribute-to-a-scim-compliant-application) - -## Disable SCIM - -If SCIM is disabled, any user provisioned through SCIM will remain in the organization. Future changes for your users will not sync from your IdP. User de-provisioning is only possible when manually removing the user from the organization. - -{{< tabs >}} -{{< tab name="Admin Console" >}} - -{{< include "admin-early-access.md" >}} - -{{% admin-scim-disable product="admin" %}} - -{{< /tab >}} -{{< tab name="Docker Hub" >}} - -{{% admin-scim-disable %}} - -{{< /tab >}} -{{< /tabs >}} - -## More resources - -The following videos demonstrate how to configure SCIM for your IdP: - -- [Video: Configure SCIM with Okta](https://youtu.be/c56YECO4YP4?feature=shared&t=1314) -- [Video: Attribute mapping with Okta](https://youtu.be/c56YECO4YP4?feature=shared&t=1998) -- [Video: Configure SCIM with Entra ID (Azure)](https://youtu.be/bGquA8qR9jU?feature=shared&t=1668) -- [Video: Attribute and group mapping with Entra ID (Azure)](https://youtu.be/bGquA8qR9jU?feature=shared&t=2039) diff --git a/content/manuals/security/for-admins/single-sign-on/configure.md b/content/manuals/security/for-admins/single-sign-on/configure.md deleted file mode 100644 index 7a42d24c4269..000000000000 --- a/content/manuals/security/for-admins/single-sign-on/configure.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -description: Learn how to configure single sign-on for your organization or company. -keywords: configure, sso, docker hub, hub, docker admin, admin, security -title: Configure single sign-on -linkTitle: Configure -aliases: -- /docker-hub/domains/ -- /docker-hub/sso-connection/ -- /docker-hub/enforcing-sso/ -- /single-sign-on/configure/ -- /admin/company/settings/sso-configuration/ -- /admin/organization/security-settings/sso-configuration/ ---- - -{{< summary-bar feature_name="SSO" >}} - -Get started creating a single sign-on (SSO) connection for your organization or company. This guide walks through the steps to add and verify the domains your members use to sign in to Docker. - -## Step one: Add your domain - -{{< tabs >}} -{{< tab name="Admin Console" >}} - -{{< include "admin-early-access.md" >}} - -1. Sign in to the [Admin Console](https://admin.docker.com/). -2. Select your organization or company from the **Choose profile** page. Note that when an organization is part of a company, you must select the company and configure the domain for the organization at the company level. -3. Under **Security and access**, select **Domain management**. -4. Select **Add a domain**. -5. Enter your domain in the text box and select **Add domain**. -6. The pop-up modal will prompt you with steps to verify your domain. Copy the **TXT Record Value**. - -{{< /tab >}} -{{< tab name="Docker Hub" >}} - -1. Sign in to [Docker Hub](https://hub.docker.com/). -2. Select **Organizations** and then your organization from the list. -3. On your organization page, select **Settings** and then **Security**. -4. Select **Add a domain**. -5. Enter your domain in the text box and select **Add domain**. -6. The pop-up modal will prompt you with steps to verify your domain. Copy the **TXT Record Value**. - -{{< /tab >}} -{{< /tabs >}} - -## Step two: Verify your domain - -Verifying your domain ensures Docker knows you own it. Domain verification is done by adding your Docker TXT Record Value to your domain host. The TXT Record Value proves ownership, which signals the Domain Name System (DNS) to add this record. It can take up to 72 hours for DNS to recognize the change. When the change is reflected in DNS, Docker will automatically check the record to confirm your ownership. - -{{< tabs >}} -{{< tab name="Admin Console" >}} - -{{< include "admin-early-access.md" >}} - -1. Navigate to your domain host, create a new TXT record, and paste the **TXT Record Value** from Docker. -2. TXT record verification can take 72 hours. Once you have waited for TXT record verification, return to the **Domain management** page of the Admin Console and select **Verify** next to your domain name. - -{{< /tab >}} -{{< tab name="Docker Hub" >}} - -1. Navigate to your domain host, create a new TXT record, and paste the **TXT Record Value** from Docker. -2. TXT Record Verification can take 72 hours. Once you have waited for TXT record verification, return to the **Security** page of Docker Hub and select **Verify** next to your domain name. - -{{< /tab >}} -{{< /tabs >}} - -Once you have added and verified your domain, you are ready to create an SSO connection between Docker and your identity provider (IdP). - -## More resources - -The following videos walk through verifying your domain to create your SSO connection in Docker. - -- [Video: Verify your domain for SSO with Okta](https://youtu.be/c56YECO4YP4?feature=shared&t=529) -- [Video: Verify your domain for SSO with Azure AD (OIDC)](https://youtu.be/bGquA8qR9jU?feature=shared&t=496) - -## What's next? - -[Connect Docker and your IdP](../single-sign-on/connect.md). - diff --git a/content/manuals/security/for-admins/single-sign-on/connect.md b/content/manuals/security/for-admins/single-sign-on/connect.md deleted file mode 100644 index 3ac1bd14db7b..000000000000 --- a/content/manuals/security/for-admins/single-sign-on/connect.md +++ /dev/null @@ -1,225 +0,0 @@ ---- -description: Learn how to complete your single-sign on connection and next steps for enabling SSO. -keywords: configure, sso, docker hub, hub, docker admin, admin, security -title: Create an SSO connection -linkTitle: Connect ---- - -{{< summary-bar feature_name="SSO" >}} - -Creating a single sign-on (SSO) connection requires setting up the connection in Docker first, followed by setting up the connection in your identity provider (IdP). This guide provides steps for setting up your SSO connection in Docker and your IdP. - -> [!TIP] -> -> This guide requires copying and pasting values in both Docker and your IdP. To ensure a seamless connection process, complete all the steps in this guide in one session and keep separate browsers open for both Docker and your IdP. - -## Prerequisites - -Make sure you have completed the following before you begin: - -- Your domain is verified -- You have an account set up with an IdP -- You have completed the steps in the [Configure single sign-on](../single-sign-on/configure.md) guide - -## Step one: Create an SSO connection in Docker - ->[!NOTE] -> -> Before creating an SSO connection in Docker, you must verify at least one domain. - -{{< tabs >}} -{{< tab name="Admin Console" >}} - -{{< include "admin-early-access.md" >}} - -1. Sign in to the [Admin Console](https://admin.docker.com/). -2. Select your organization or company from the **Choose profile** page. Note that when an organization is part of a company, you must select the company and configure the domain for the organization at the company level. -3. Under Security and access, select **SSO and SCIM**. -4. Select **Create Connection** and provide a name for the connection. -5. Select an authentication method, **SAML** or **Azure AD (OIDC)**. -6. Copy the following fields to add to your IdP: - - Okta SAML: **Entity ID**, **ACS URL** - - Azure OIDC: **Redirect URL** -7. Keep this window open so you can paste the connection information from your IdP here at the end of this guide. - -{{< /tab >}} -{{< tab name="Docker Hub" >}} - -1. Sign in to Docker Hub. -2. Select **Organizations** and then your organization from the list. -3. On your organization page, select **Settings** and then **Security**. -4. In the SSO connection table, select **Create Connection** and provide a name for the connection. -5. Select an authentication method, **SAML** or **Azure AD (OIDC)**. -6. Copy the following fields to add to your IdP: - - Okta SAML: **Entity ID**, **ACS URL** - - Azure OIDC: **Redirect URL** -7. Keep this window open so you can paste the connection information from your IdP here at the end of this guide. - -{{< /tab >}} -{{< /tabs >}} - -## Step two: Create an SSO connection in your IdP - -The user interface for your IdP may differ slightly from the following steps. Refer to the documentation for your IdP to verify. - -{{< tabs >}} -{{< tab name="Okta SAML" >}} - -1. Sign in to your Okta account. -2. Select **Admin** to open the Okta Admin portal. -3. From the left-hand navigation, select **Administration**. -4. Select **Administration** and then **Create App Integration**. -5. Select **SAML 2.0** and then **Next**. -6. Enter "Docker Hub" as your **App Name**. -7. Optional. Upload a logo. -8. Select **Next**. -9. Enter the following values from Docker into their corresponding Okta fields: - - Docker ACS URL: **Single Sign On URL** - - Docker Entity ID: **Audience URI (SP Entity ID)** -10. Configure the following settings in Okta: - - Name ID format: `EmailAddress` - - Application username: `Email` - - Update application on: `Create and update` -11. Select **Next**. -12. Select the **This is an internal app that we have created** checkbox. -13. Select **Finish**. - -{{< /tab >}} -{{< tab name="Entra ID SAML 2.0" >}} - -1. Sign in to your Azure AD admin portal. -2. Select **Default Directory** and then **Add**. -3. Choose **Enterprise Application** and select **Create your own application**. -4. Enter "Docker" for application name and select the **non-gallery** option. -5. After the application is created, go to **Single Sign-On** and select **SAML**. -6. Select **Edit** on the **Basic SAML configuration** section. -7. Enter the following values from Docker into their corresponding Azure fields: - - Docker Entity ID: **Identifier** - - Docker ACS URL: **Reply URL** -8. Save configuration. -9. From the **SAML Signing Certificate** section, download your **Certificate (Base64)**. - -{{< /tab >}} -{{< tab name="Azure Connect (OIDC)" >}} - -To create an Azure Connect (OIDC) connection, you must create an app registration, client secrets, and configure API permissions for Docker: - -### Create app registration - -1. Sign in to your Azure AD admin portal. -2. Select **App Registration** and then **New Registration**. -3. Enter "Docker Hub SSO" or similar for application name. -4. Under **Supported account types**, specify who can use this application or access the app. -5. In the **Redirect URI** section, select **Web** from the drop-down menu and paste the **Redirect URI** value from the Docker console into this field. -6. Select **Register** to register the app. -7. Copy the **Client ID** from the app's overview page. You need this information to continue configuring SSO in Docker. - -### Create client secrets - -1. Open your app in Azure AD and select **Certificates & secrets**. -2. Select **+ New client secret**. -3. Specify the description of the secret and set how long keys can be used. -4. Select **Add** to continue. -5. Copy the secret **Value** field. You need this to continue configuring SSO in Docker. - -### Configure API permissions - -1. Open your app in Azure AD and navigate to your app settings. -2. Select **API permission** and then **Grant admin consent for [your tenant name]**. -3. Select **Yes** to confirm. -4. After confirming, select **Add a permission** and then **Delegated permissions**. -5. Search for `User.Read` and select this option. -6. Select **Add permissions** to confirm. -7. Verify admin consent was granted for each permission by checking the **Status** column. - -{{< /tab >}} -{{< /tabs >}} - -## Step three: Connect Docker and your IdP - -After creating your connection in Docker and your IdP, you can cross-connect them to complete your SSO connection: - -{{< tabs >}} -{{< tab name="Okta SAML" >}} - -1. Open your app you created in Okta and select **View SAML setup instructions**. -2. Copy the following values from the Okta SAML setup instruction page: - - **SAML Sign-in URL** - - **x509 Certificate** -3. Open Docker Hub or the Admin Console. Your SSO configuration page should still be open from Step one of this guide. -4. Select **Next** to open the **Update single-sign on connection** page. -5. Paste your Okta **SAML Sign-in URL** and **x509 Certificate** values in Docker. -6. Select **Next**. -7. Optional. Select a default team to provision users to and select **Next**. -8. Verify your SSO connection details and select **Create Connection**. - -{{< /tab >}} -{{< tab name="Entra ID SAML 2.0" >}} - -1. Open your app in Azure AD. -2. Open your downloaded **Certificate (Base64)** in a text editor. -3. Copy the following values: - - From Azure AD: **Login URL** - - Copy your the contents of your **Certificate (Base64)** file from your text editor -4. Open Docker Hub or the Admin Console. Your SSO configuration page should still be open from Step one of this guide. -5. Paste your **Login URL** and **Certificate (Base64)** values in Docker. -6. Select **Next**. -7. Optional. Select a default team to provision users to and select **Next**. -8. Verify your SSO connection details and select **Create Connection**. - -{{< /tab >}} -{{< tab name="Azure Connect (OIDC)" >}} - -1. Open Docker Hub or the Admin Console. Your SSO configuration page should still be open from Step one of this guide. -2. Paste the following values from Azure AD in to Docker: - - **Client ID** - - **Client Secret** - - **Azure AD Domain** -3. Select **Next**. -4. Optional. Select a default team to provision users to and select **Next**. -5. Verify your SSO connection details and select **Create Connection**. - -{{< /tab >}} -{{< /tabs >}} - -## Step four: Test your connection - -After you've completed the SSO connection process in Docker, we recommend testing it: - -1. Open an incognito browser. -2. Sign in to the Admin Console using your **domain email address**. -3. The browser will redirect to your IdP's login page to authenticate. -4. Authenticate through your domain email instead of using your Docker ID. - -You can also test your SSO connection through the command-line interface (CLI). If you want to test through the CLI, your users must have a personal access token (PAT). - - -## Optional: Enforce SSO - ->[!IMPORTANT] -> -> If SSO isn't enforced, users can choose to sign in with either their Docker username and password or SSO. - -Enforcing SSO requires users to use SSO when signing into Docker. This centralizes authentication and enforces policies set by the IdP. - -1. Sign in to the [Admin Console](https://admin.docker.com/). -2. Select your organization or company from the **Choose profile** page. Note that when an organization is part of a company, you must select the company and configure the domain for the organization at the company level. -3. Under Security and access, select **SSO and SCIM**. -4. In the SSO connections table, select the **Action** icon and then **Enable enforcement**. When SSO is enforced, your users are unable to modify their email address and password, convert a user account to an organization, or set up 2FA through Docker Hub. If you want to use 2FA, you must enable 2FA through your IdP. -5. Continue with the on-screen instructions and verify you've completed all tasks. -6. Select **Turn on enforcement** to complete. - -Your users must now sign in to Docker with SSO. - -## More resources - -The following videos demonstrate how to enforce SSO. - -- [Video: Enforce SSO with Okta SAML](https://youtu.be/c56YECO4YP4?feature=shared&t=1072) -- [Video: Enforce SSO with Azure AD (OIDC)](https://youtu.be/bGquA8qR9jU?feature=shared&t=1087) - - -## What's next - -- [Provision users](/manuals/security/for-admins/provisioning/_index.md) -- [Enforce sign-in](../enforce-sign-in/_index.md) diff --git a/content/manuals/security/for-admins/single-sign-on/troubleshoot.md b/content/manuals/security/for-admins/single-sign-on/troubleshoot.md deleted file mode 100644 index e5585e36a8a7..000000000000 --- a/content/manuals/security/for-admins/single-sign-on/troubleshoot.md +++ /dev/null @@ -1,165 +0,0 @@ ---- -description: Learn how to troubleshoot common SSO issues. -keywords: sso, troubleshoot, single sign-on -title: Troubleshoot single sign-on -linkTitle: Troubleshoot -tags: [Troubleshooting] ---- - -While configuring or using single sign-on (SSO), you may encounter issues that -can stem from your identity provider (IdP) or Docker configuration. The -following sections describe how to view the error messages in the Docker Admin -Console as well as some common errors and possible solutions. You can also see -your identity provider's documentation to learn if you can view error logs in -their service. - -## View SSO and SCIM error logs - -1. Sign in to the [Admin Console](https://app.docker.com/admin/). -2. Select your organization or company from the **Choose profile** page, and then select **SSO and SCIM**. - - > [!NOTE] - > - > When an organization is part of a company, you must select the company and - > view the SSO connection for that organization at the company level. - -3. In the SSO connections table, select the **Actions** icon and **View error - logs**. The **Connection errors** page appears with a list of errors that - have occurred in the past 7 days. -4. In the **Connection errors** page, select **View error details** next to an - error message for more details. A modal appears with a JSON object containing - more details. - -## Common SSO errors and solutions - -[View the SSO and SCIM error logs](#view-sso-and-scim-error-logs) and then use -the following sections for solutions to common configuration errors. - -### IdP-initiated sign in is not enabled for connection - -An error message, similar to the following, appears in the error logs for this -issue. - -```text -IdP-Initiated sign in is not enabled for connection '$ssoConnection'. -``` - -Docker doesn't support an IdP-initiated SAML flow. This error can occur when a -user attempts to authenticate from the IdP, for example using the Docker SSO App -tile on the dashboard. - -Possible solutions: - - * The user must initiate authentication from Docker apps (Hub, Desktop, etc). - The user needs to enter their email address and they will get redirected to - the configured SSO IdP for their domain. - * (Optional) Configure the Docker SSO App as not visible to users on your IdP - so users don’t attempt to start authentication from there. - -### Not enough seats in organization - -An error message, similar to the following, appears in the error logs for this -issue. - -```text -Not enough seats in organization '$orgName'. Add more seats or contact your administrator. -``` - -This error can occur when attempting to provision a user into the organization -via SSO Just-in-Time provisioning or SCIM, but the organization has no available -seats for the user. - -Possible solutions: - - * Add more Docker Business subscription seats to the organization. For details, - see [Add seats to your subscription](../../../subscription/manage-seats.md). - * Remove some users or pending invitations from your organization to make more - seats available. For more details, see [Manage organization - members](/admin/organization/members/). - -### Domain is not verified for SSO connection - -An error message, similar to the following, appears in the error logs for this -issue. - -```text -Domain '$emailDomain' is not verified for your SSO connection. Contact your company administrator. TraceID: XXXXXXXXXXXXXX -``` - -This error occurs if the IdP authenticated a user through SSO and the UPN -returned to Docker doesn’t match any of the verified domains associated to the -SSO connection configured in Docker. - -Possible solutions: - - * Make sure the IdP SSO connection is returning the correct UPN value as part - of the assertion attributes (attributes mapping). - * Add and verify all domains and subdomains that are used as UPN by your IdP - and associate them to your Docker SSO connection. For more details, see [Configure single sign-on](../single-sign-on/configure.md). - -### Unable to find session - -An error message, similar to the following, appears in the error logs for this -issue. - -```text -We couldn't find your session. You may have pressed the back button, refreshed the page, opened too many sign-in dialogs, or there is some issue with cookies. Try signing in again. If the issue persists, contact your administrator. -``` - -This error typically occurs during the authentication flow when a user presses -the back or the refresh button on the browser. This causes the sign-in flow to -lose track of the initial authentication request, which is required to complete -all authentication flows. - -Possible solutions: - - * Avoid pressing the back or refresh buttons during sign in. - * Close the browser’s tab and start the authentication flow from the beginning - in the app (Docker Desktop, Hub, etc.) - -### User is not assigned to the organization - -An error message, similar to the following, appears in the error logs for this -issue. - -```text -User '$username' is not assigned to this SSO organization. Contact your administrator. TraceID: XXXXXXXXXXXXX -``` - -This error occurs if SSO Just-In-Time (JIT) provisioning is disabled. JIT -provisioning ensures that a user is added to an organization after they -authenticate via SSO. JIT provisioning can be optionally disabled to prevent -users taking seats in the organization automatically or when SCIM is used as -the only option for user provisioning. - -Possible solutions: - - * Review your SSO connection configuration and enable JIT provisioning to add - all users to the organization after authenticating via SSO. For more details, - see [Just-in-Time - provisioning](/security/for-admins/provisioning/just-in-time/). - * If JIT provisioning should remain disabled, then add the user to the - organization by manually inviting them. Next time the user authenticates via - SSO they will get added to the org because they are invited. For more - details, see [Manage organization members](/admin/organization/members/). - * If SCIM should provision the user, then ensure that the IdP controlling SCIM - provisioning is properly configured to synchronize users with Docker as soon - as they get assigned to the app. For more details, refer to your identity - provider's documentation. - -### Name ID is not an email address - -An error message, similar to the following, appears in the error logs for this -issue. - -```text -The name ID sent by the identity provider is not an email address. Contact your company administrator. -``` - -This error can occur during SAML authentication, when your IdP sends back a Name -ID (UPN) that doesn't comply with the email address format required. The Docker -SSO app requires a name identifier to be the primary email address of the user. - -Possible solutions: - - * Ensure that the Name ID attribute format is `EmailAddress`. diff --git a/content/manuals/security/for-developers/_index.md b/content/manuals/security/for-developers/_index.md deleted file mode 100644 index 72aff1827fac..000000000000 --- a/content/manuals/security/for-developers/_index.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -build: - render: never -title: For developers -weight: 20 ---- diff --git a/content/manuals/security/security-announcements.md b/content/manuals/security/security-announcements.md index bc1bb163d827..1f97a033ff2c 100644 --- a/content/manuals/security/security-announcements.md +++ b/content/manuals/security/security-announcements.md @@ -7,29 +7,59 @@ toc_min: 1 toc_max: 2 --- +## Docker Desktop 4.43.0 Security Update: CVE-2025-6587 + +_Last updated July 03, 2025_ + +A vulnerability in Docker Desktop was fixed on July 03 in the [4.43.0](/manuals/desktop/release-notes.md#4430) release: + +- Fixed [CVE-2025-6587](https://www.cve.org/CVERecord?id=CVE-2025-6587) where sensitive system environment variables were included in Docker Desktop diagnostic logs, allowing for potential secret exposure. + +## Docker Desktop 4.41.0 Security Update: CVE-2025-3224, CVE-2025-4095, and CVE-2025-3911 + +_Last updated May 15, 2025_ + +Three vulnerabilities in Docker Desktop were fixed on April 28 in the [4.41.0](/manuals/desktop/release-notes.md#4410) release. + +- Fixed [CVE-2025-3224](https://www.cve.org/CVERecord?id=CVE-2025-3224) allowing an attacker with access to a user machine to perform an elevation of privilege when Docker Desktop updates. +- Fixed [CVE-2025-4095](https://www.cve.org/CVERecord?id=CVE-2025-4095) where Registry Access Management (RAM) policies were not enforced when using a MacOS configuration profile, allowing users to pull images from unapproved registries. +- Fixed [CVE-2025-3911](https://www.cve.org/CVERecord?id=CVE-2025-3911) allowing an attacker with read access to a user's machine to obtain sensitive information from Docker Desktop log files, including environment variables configured for running containers. + +We strongly encourage you to update to Docker Desktop [4.41.0](/manuals/desktop/release-notes.md#4410). + ## Docker Desktop 4.34.2 Security Update: CVE-2024-8695 and CVE-2024-8696 _Last updated September 13, 2024_ -Two remote code execution (RCE) vulnerabilities in Docker Desktop related to Docker Extensions were reported by [Cure53](https://cure53.de/) and were fixed on September 12 in the [4.34.2](https://docs.docker.com/desktop/release-notes/#4342) release. +Two remote code execution (RCE) vulnerabilities in Docker Desktop related to Docker Extensions were reported by [Cure53](https://cure53.de/) and were fixed on September 12 in the [4.34.2](/manuals/desktop/release-notes.md#4342) release. - [CVE-2024-8695](https://www.cve.org/cverecord?id=CVE-2024-8695): A remote code execution (RCE) vulnerability via crafted extension description/changelog could be abused by a malicious extension in Docker Desktop before 4.34.2. [Critical] - [CVE-2024-8696](https://www.cve.org/cverecord?id=CVE-2024-8696): A remote code execution (RCE) vulnerability via crafted extension publisher-url/additional-urls could be abused by a malicious extension in Docker Desktop before 4.34.2. [High] No existing extensions exploiting the vulnerabilities were found in the Extensions Marketplace. The Docker team will be closely monitoring and diligently reviewing any requests for publishing new extensions. -We strongly encourage you to update to Docker Desktop [4.34.2](https://docs.docker.com/desktop/release-notes/#4342). If you are unable to update promptly, you can [disable Docker Extensions](https://docs.docker.com/extensions/settings-feedback/#turn-on-or-turn-off-extensions) as a workaround. +We strongly encourage you to update to Docker Desktop [4.34.2](/manuals/desktop/release-notes.md#4342). If you are unable to update promptly, you can [disable Docker Extensions](/manuals/extensions/settings-feedback.md#turn-on-or-turn-off-extensions) as a workaround. ## Deprecation of password logins on CLI when SSO enforced _Last updated July, 2024_ -When [SSO enforcement](/manuals/security/for-admins/single-sign-on/connect.md) was first introduced, Docker provided a grace period to continue to let passwords be used on the Docker CLI when authenticating to Docker Hub. This was allowed so organizations could more easily use SSO enforcement. It is recommended that administrators configuring SSO encourage users using the CLI [to switch over to Personal Access Tokens](/security/for-admins/single-sign-on/#prerequisites) in anticipation of this grace period ending. +When [SSO enforcement](/manuals/enterprise/security/single-sign-on/connect.md) was first introduced, Docker provided a grace period to continue to let passwords be used on the Docker CLI when authenticating to Docker Hub. This was allowed so organizations could more easily use SSO enforcement. It is recommended that administrators configuring SSO encourage users using the CLI [to switch over to Personal Access Tokens](/manuals/enterprise/security/single-sign-on/_index.md#prerequisites) in anticipation of this grace period ending. On September 16, 2024 the grace period will end and passwords will no longer be able to authenticate to Docker Hub via the Docker CLI when SSO is enforced. Affected users are required to switch over to using PATs to continue signing in. At Docker, we want the experience to be the most secure for our developers and organizations and this deprecation is an essential step in that direction. +## SOC 2 Type 2 attestation and ISO 27001 certification + +_Last updated June, 2024_ + +Docker is pleased to announce that we have received our SOC 2 Type 2 attestation and ISO 27001 certification with no exceptions or major non-conformities. + +Security is a fundamental pillar to Docker’s operations, which is embedded into our overall mission and company strategy. Docker’s products are core to our user community and our SOC 2 Type 2 attestation and ISO 27001 certification demonstrate Docker’s ongoing commitment to security to our user base. + +For more information, see the [Blog announcement](https://www.docker.com/blog/docker-announces-soc-2-type-2-attestation-iso-27001-certification/). + ## Docker Security Advisory: Multiple Vulnerabilities in runc, BuildKit, and Moby _Last updated February 2, 2024_ @@ -61,11 +91,11 @@ If you are unable to update to an unaffected version promptly, follow these best * Only use trusted Docker images (such as [Docker Official Images](../docker-hub/image-library/trusted-content.md#docker-official-images)). * Don’t build Docker images from untrusted sources or untrusted Dockerfiles. -* If you are a Docker Business customer using Docker Desktop and unable to update to v4.27.1, make sure to enable [Hardened Docker Desktop](/manuals/security/for-admins/hardened-desktop/_index.md) features such as: - * [Enhanced Container Isolation](/manuals/security/for-admins/hardened-desktop/enhanced-container-isolation/_index.md), which mitigates the impact of CVE-2024-21626 in the case of running containers from malicious images. - * [Image Access Management](for-admins/hardened-desktop/image-access-management.md), and [Registry Access Management](/manuals/security/for-admins/hardened-desktop/registry-access-management.md), which give organizations control over which images and repositories their users can access. +* If you are a Docker Business customer using Docker Desktop and unable to update to v4.27.1, make sure to enable [Hardened Docker Desktop](/manuals/enterprise/security/hardened-desktop/_index.md) features such as: + * [Enhanced Container Isolation](/manuals/enterprise/security/hardened-desktop/enhanced-container-isolation/_index.md), which mitigates the impact of CVE-2024-21626 in the case of running containers from malicious images. + * [Image Access Management](/manuals/enterprise/security/hardened-desktop/image-access-management.md), and [Registry Access Management](/manuals/enterprise/security/hardened-desktop/registry-access-management.md), which give organizations control over which images and repositories their users can access. * For CVE-2024-23650, CVE-2024-23651, CVE-2024-23652, and CVE-2024-23653, avoid using BuildKit frontend from an untrusted source. A frontend image is usually specified as the #syntax line on your Dockerfile, or with `--frontend` flag when using the `buildctl build` command. -* To mitigate CVE-2024-24557, make sure to either use BuildKit or disable caching when building images. From the CLI this can be done via the `DOCKER_BUILDKIT=1` environment variable (default for Moby >= v23.0 if the buildx plugin is installed) or the `--no-cache flag`. If you are using the HTTP API directly or through a client, the same can be done by setting `nocache` to `true` or `version` to `2` for the [/build API endpoint](https://docs.docker.com/engine/api/v1.44/#tag/Image/operation/ImageBuild). +* To mitigate CVE-2024-24557, make sure to either use BuildKit or disable caching when building images. From the CLI this can be done via the `DOCKER_BUILDKIT=1` environment variable (default for Moby >= v23.0 if the buildx plugin is installed) or the `--no-cache flag`. If you are using the HTTP API directly or through a client, the same can be done by setting `nocache` to `true` or `version` to `2` for the [/build API endpoint](https://docs.docker.com/reference/api/engine/version/v1.44/#tag/Image/operation/ImageBuild). ### Technical details and impact diff --git a/content/manuals/subscription/_index.md b/content/manuals/subscription/_index.md index 7cfa0d8d1f16..a3c9d4c6649a 100644 --- a/content/manuals/subscription/_index.md +++ b/content/manuals/subscription/_index.md @@ -1,7 +1,7 @@ --- title: Subscription description: Learn about subscription features and how to manage your subscription -keywords: Docker, pricing, billing, Pro, Team, business, subscription, tier, plan +keywords: Docker, pricing, billing, Pro, Team, business, subscription, tier weight: 50 params: sidebar: @@ -20,7 +20,7 @@ grid_subscriptions: link: /subscription/scale/ icon: leaderboard - title: Change your subscription - description: Learn how to upgrade or downgrade your plan. + description: Learn how to upgrade or downgrade your subscription. link: /subscription/change/ icon: upgrade - title: Manage seats diff --git a/content/manuals/subscription/change.md b/content/manuals/subscription/change.md index 6328ab3f6c6d..6d9518a3659d 100644 --- a/content/manuals/subscription/change.md +++ b/content/manuals/subscription/change.md @@ -1,6 +1,6 @@ --- description: Learn how to change your Docker subscription -keywords: Docker Hub, upgrade, downgrade, subscription, Pro, Team, business, pricing plan +keywords: Docker Hub, upgrade, downgrade, subscription, Pro, Team, business, pricing title: Change your subscription aliases: - /docker-hub/upgrade/ @@ -12,19 +12,20 @@ aliases: - /docker-hub/cancel-downgrade/ - /docker-hub/billing/downgrade/ - /billing/scout-billing/ +- /billing/subscription-management/ weight: 30 --- -{{< include "tax-compliance.md" >}} +{{% include "tax-compliance.md" %}} The following sections describe how to change plans when you have a Docker -subscription plan or legacy Docker subscription plan. +subscription or legacy Docker subscription. > [!NOTE] > > Legacy Docker plans apply to Docker subscribers who last purchased or renewed > their subscription before December 10, 2024. These subscribers will keep -> their current plan and pricing until their next renewal date that falls on or +> their current subscription and pricing until their next renewal date that falls on or > after December 10, 2024. To see purchase or renewal history, view your > [billing history](../billing/history.md). For more details about legacy > subscriptions, see [Announcing Upgraded Docker @@ -32,26 +33,31 @@ subscription plan or legacy Docker subscription plan. ## Upgrade your subscription -When you upgrade a Docker plan, you immediately have access to all the features and entitlements available in your Docker subscription plan. For detailed information on features available in each subscription, see [Docker Pricing](https://www.docker.com/pricing). +When you upgrade a Docker subscription, you immediately have access to all the features and entitlements available in your Docker subscription. For detailed information on features available in each subscription, see [Docker Pricing](https://www.docker.com/pricing). {{< tabs >}} -{{< tab name="Docker plan" >}} +{{< tab name="Docker subscription" >}} To upgrade your Docker subscription: -1. Sign in to [Docker Home](https://app.docker.com/). -2. Under Settings and administration, select **Billing**. -3. Optional. If you're upgrading from a free Personal plan to a Team plan and want to keep your username, [convert your user account into an organization](../admin/organization/convert-account.md). -4. Select the account you want to upgrade in the drop-down at the top-left of the page. -5. Select **Upgrade**. -6. Follow the on-screen instructions to complete your upgrade. +1. Sign in to [Docker Home](https://app.docker.com/) and select the organization +you want to upgrade. +1. Select **Billing**. +1. Optional. If you're upgrading from a free Personal subscription to a Team subscription and want to keep your username, [convert your user account into an organization](../admin/organization/convert-account.md). +1. Select **Upgrade**. +1. Follow the on-screen instructions to complete your upgrade. + +> [!NOTE] +> +> If you choose to pay using a US bank account, you must verify the account. For +> more information, see [Verify a bank account](manuals/billing/payment-method.md#verify-a-bank-account). {{< /tab >}} -{{< tab name="Legacy Docker plan" >}} +{{< tab name="Legacy Docker subscription" >}} -You can upgrade a legacy Docker Core, Docker Build Cloud, or Docker Scout subscription plan to a Docker subscription plan that includes access to all tools. +You can upgrade a legacy Docker Core, Docker Build Cloud, or Docker Scout subscription to a Docker subscription that includes access to all tools. -Contact [Docker sales](https://www.docker.com/pricing/contact-sales/) to upgrade your legacy Docker plan. +Contact [Docker sales](https://www.docker.com/pricing/contact-sales/) to upgrade your legacy Docker subscription. {{< /tab >}} {{< /tabs >}} @@ -70,42 +76,41 @@ When you downgrade your subscription, access to paid features is available until > - SSO and SCIM: If you want to downgrade a Docker Business subscription and your organization uses single sign-on (SSO) for user authentication, you need to remove your SSO connection and verified domains before downgrading. After removing the SSO connection, any organization members that were auto-provisioned (for example, with SCIM) need to set up a password to sign in without SSO. To do this, users can [reset their password at sign in](/accounts/create-account/#reset-your-password-at-sign-in). {{< tabs >}} -{{< tab name="Docker plan" >}} +{{< tab name="Docker subscription" >}} If you have a [sales-assisted Docker Business subscription](details.md#sales-assisted), contact your account manager to downgrade your subscription. To downgrade your Docker subscription: -1. Sign in to [Docker Home](https://app.docker.com/). -2. Under Settings and administration, select **Billing**. -3. Select the account you want to downgrade in the drop-down at the top-left of the page. -4. Select the action icon and then **Cancel subscription**. -5. Review the cancellation warnings, then select **Confirm cancellation**. -6. Optional. Fill out the feedback survey, or select **Skip**. +1. Sign in to [Docker Home](https://app.docker.com/) and select +the organization you want to downgrade. +1. Select **Billing**. +1. Select the action icon and then **Cancel subscription**. +1. Fill out the feedback survey to continue with cancellation. {{< /tab >}} -{{< tab name="Legacy Docker plan" >}} +{{< tab name="Legacy Docker subscription" >}} If you have a [sales-assisted Docker Business subscription](details.md#sales-assisted), contact your account manager to downgrade your subscription. -### Downgrade Legacy Docker plan +### Downgrade Legacy Docker subscription To downgrade your legacy Docker subscription: -1. Sign in to [Docker Hub Billing](https://hub.docker.com/billing). -2. Select the account you want to downgrade in the drop-down at the top-left of the page. -3. Select the link to **Manage this account on Docker Hub**. -4. In the plan section, select **Change plan**. -5. Follow the on-screen instructions to complete your downgrade. +1. Sign in to [Docker Hub](https://hub.docker.com/billing). +1. Select the organization you want to downgrade, then select **Billing**. +1. To downgrade, you must navigate to the upgrade plan page. Select **Upgrade**. +1. On the upgrade page, select **Downgrade** in the **Free Team** plan card. +1. Follow the on-screen instructions to complete your downgrade. ### Downgrade Docker Build Cloud subscription To downgrade your Docker Build Cloud subscription: -1. Sign in to [Docker Home](https://app.docker.com) and open **Docker Build Cloud**. -2. Select **Account settings**, then **Downgrade**. -3. To confirm your downgrade, type **DOWNGRADE** in the text field and select **Yes, continue**. -4. The account settings page will update with a notification bar notifying you of your downgrade date (start of next billing cycle). +1. Sign in to [Docker Home](https://app.docker.com) and select **Build Cloud**. +1. Select **Account settings**, then **Downgrade**. +1. To confirm your downgrade, type **DOWNGRADE** in the text field and select **Yes, continue**. +1. The account settings page will update with a notification bar notifying you of your downgrade date (start of next billing cycle). {{< /tab >}} {{< /tabs >}} diff --git a/content/manuals/subscription/details.md b/content/manuals/subscription/details.md index b4158c92bf09..5cf7a39fd903 100644 --- a/content/manuals/subscription/details.md +++ b/content/manuals/subscription/details.md @@ -8,7 +8,7 @@ aliases: weight: 10 --- -Docker subscription plans empower development teams by providing the tools they need to ship secure, high-quality apps — fast. These plans include access to Docker's suite of products: +Docker subscriptions empower development teams by providing the tools they need to ship secure, high-quality apps — fast. These plans include access to Docker's suite of products: - [Docker Desktop](../desktop/_index.md): The industry-leading container-first development solution that includes, Docker Engine, Docker CLI, Docker Compose, @@ -23,15 +23,14 @@ Docker subscription plans empower development teams by providing the tools they and more. The following sections describe some of the key features included with your -Docker subscription plan or Legacy Docker plan. +Docker subscription or Legacy Docker subscription. > [!NOTE] > -> Legacy Docker plans apply to Docker subscribers who last purchased or renewed their subscription before December 10, 2024. These subscribers will keep their current plan and pricing until their next renewal date that falls on or after December 10, 2024. To see purchase or renewal history, view your [billing history](../billing/history.md). For more details about Docker legacy plans, see [Announcing Upgraded Docker Plans](https://www.docker.com/blog/november-2024-updated-plans-announcement/). In addition to current features, Docker maintains a [public roadmap](https://github.com/docker/roadmap) so subscribers can see what new -features are in development, as well as request new capabilities. +> Legacy Docker plans apply to Docker subscribers who last purchased or renewed their subscription before December 10, 2024. These subscribers will keep their current subscription and pricing until their next renewal date that falls on or after December 10, 2024. To see purchase or renewal history, view your [billing history](../billing/history.md). For more details about Docker Legacy subscriptions, see [Announcing Upgraded Docker Plans](https://www.docker.com/blog/november-2024-updated-plans-announcement/). {{< tabs >}} -{{< tab name="Docker plan" >}} +{{< tab name="Docker subscription" >}} ## Docker Personal @@ -43,17 +42,16 @@ Docker Personal includes: - 1 included repository with continuous vulnerability analysis in Docker Scout - Unlimited public Docker Hub repositories -- 1 private Docker Hub repository with 2GB storage -- 40 pulls per hour Docker Hub image pull rate limit +- 200 pulls per 6 hours Docker Hub image pull rate limit for authenticated users - 7-day Docker Build Cloud trial - 7-day Testcontainers Cloud trial Docker Personal users who want to continue using Docker Build Cloud or Docker -Testcontainers Cloud after their trial can upgrade to a Docker Pro plan at any +Testcontainers Cloud after their trial can upgrade to a Docker Pro subscription at any time. -All unauthenticated user, including unauthenticated Docker Personal users, get -10 pulls per hour per IP address. +All unauthenticated users, including unauthenticated Docker Personal users, get +100 pulls per 6 hours per IPv4 address or IPv6 /64 subnet. For a list of features available in each tier, see [Docker Pricing](https://www.docker.com/pricing/). @@ -69,11 +67,11 @@ Testcontainers Cloud. Docker Pro includes: -- 200 Docker Build Cloud minutes per month. +- 200 Docker Build Cloud build minutes per month. Docker Build Cloud minutes do not +rollover month to month. - 2 included repositories with continuous vulnerability analysis in Docker Scout. -- 100 Testcontainers Cloud runtime minutes per month for use either in Docker Desktop or for CI. +- 100 Testcontainers Cloud runtime minutes per month for use either in Docker Desktop or for CI. Testcontainers Cloud runtime minutes do not rollover month to month. - No Docker Hub image pull rate limits. -- 25K Docker Hub pulls per month are included. For a list of features available in each tier, see [Docker Pricing](https://www.docker.com/pricing/). @@ -89,11 +87,11 @@ Docker Hub, Docker Scout, Docker Build Cloud, and Testcontainers Cloud. Docker Team includes: -- 500 Docker Build Cloud minutes per month. +- 500 Docker Build Cloud build minutes per month. Docker Build Cloud minutes do not +rollover month to month. - Unlimited Docker Scout repositories with continuous vulnerability analysis. -- 500 Testcontainers Cloud runtime minutes per month for use either in Docker Desktop or for CI. +- 500 Testcontainers Cloud runtime minutes per month for use either in Docker Desktop or for CI. Testcontainers Cloud runtime minutes do not rollover month to month. - No Docker Hub image pull rate limits. -- 100K Docker Hub pulls per month. There are also advanced collaboration and management tools, including organization and team management with [Role Based Access Control @@ -113,20 +111,19 @@ Build Cloud, and Testcontainers Cloud. Docker Business includes: -- 1500 Docker Build Cloud minutes per month. +- 1500 Docker Build Cloud build minutes per month. Docker Build Cloud minutes do not +rollover month to month. - Unlimited Docker Scout repositories with continuous vulnerability analysis. -- 1500 Testcontainers Cloud runtime minutes per month for use either in Docker Desktop or - for CI. +- 1500 Testcontainers Cloud runtime minutes per month for use either in Docker Desktop or for CI. Testcontainers Cloud runtime minutes do not rollover month to month. - No Docker Hub image pull rate limits. -- 1M Docker Hub pulls per month. In addition, you gain access to enterprise-grade features, such as: -- [Hardened Docker Desktop](../security/for-admins/hardened-desktop/_index.md) +- [Hardened Docker Desktop](/manuals/enterprise/security/hardened-desktop/_index.md) - [Image Access - Management](../security/for-admins/hardened-desktop/image-access-management.md) + Management](/manuals/enterprise/security/hardened-desktop/image-access-management.md) which lets admins control what content developers can access - [Registry Access - Management](../security/for-admins/hardened-desktop/registry-access-management.md) + Management](/manuals/enterprise/security/hardened-desktop/registry-access-management.md) which lets admins control what registries developers can access - [Company layer](/admin/company/) to manage multiple organizations and settings - [Single sign-on](/security/for-admins/single-sign-on/) @@ -148,7 +145,7 @@ You can: ## Sales-assisted -A sales-assisted plan refers to a Docker Business or Team subscription where everything is set up and +A sales-assisted subscription refers to a Docker Business or Team subscription where everything is set up and managed by a dedicated Docker account manager. {{< /tab >}} @@ -157,7 +154,7 @@ managed by a dedicated Docker account manager. > [!IMPORTANT] > > As of December 10, 2024, Docker Core, Docker Build Cloud, and Docker Scout -> subscription plans are no longer available and have been replaced by Docker subscription +> subscriptions are no longer available and have been replaced by Docker subscription > plans that provide access to all tools. If you subscribed or renewed > your subscriptions before December 10, 2024, your legacy Docker > plans still apply to your account until you renew. For more details, @@ -179,7 +176,7 @@ their customers. Legacy Docker Pro includes: - Unlimited public repositories -- Unlimited [Scoped Access Tokens](/security/for-developers/access-tokens/) +- Unlimited [Scoped Access Tokens](/security/access-tokens/) - Unlimited [collaborators](/docker-hub/repos/manage/access/#collaborators) for public repositories at no cost per month. - Access to [Legacy Docker Scout Free](#legacy-docker-scout-free) to get started with software supply chain security. - Unlimited private repositories @@ -189,15 +186,14 @@ Legacy Docker Pro includes: For a list of features available in each legacy tier, see [Legacy Docker Pricing](https://www.docker.com/legacy-pricing/). -#### Upgrade your Legacy Docker Pro plan +#### Upgrade your Legacy Docker Pro subscription -When you upgrade your Legacy Docker Pro plan to a Docker Pro subscription plan, your plan includes the following changes: +When you upgrade your Legacy Docker Pro subscription to a Docker Pro subscription, your subscription includes the following changes: -- Docker Build Cloud minutes increased from 100/month to 200/month and no monthly fee. +- Docker Build Cloud build minutes increased from 100/month to 200/month and no monthly fee. Docker Build Cloud minutes do not rollover month to month. - 2 included repositories with continuous vulnerability analysis in Docker Scout. -- 100 Testcontainers Cloud runtime minutes are now included for use either in Docker Desktop or for CI. +- 100 Testcontainers Cloud runtime minutes are now included for use either in Docker Desktop or for CI. Testcontainers Cloud runtime minutes do not rollover month to month. - Docker Hub image pull rate limits are removed. -- 25K Docker Hub pulls per month are included. For a list of features available in each tier, see [Docker Pricing](https://www.docker.com/pricing/). @@ -221,16 +217,15 @@ There are also advanced collaboration and management tools, including organizati For a list of features available in each legacy tier, see [Legacy Docker Pricing](https://www.docker.com/legacy-pricing/). -#### Upgrade your Legacy Docker Team plan +#### Upgrade your Legacy Docker Team subscription -When you upgrade your Legacy Docker Team plan to a Docker Team subscription plan, your plan includes the following changes: +When you upgrade your Legacy Docker Team subscription to a Docker Team subscription, your subscription includes the following changes: -- Instead of paying an additional per-seat fee, Docker Build Cloud is now available to all users in your Docker plan. -- Docker Build Cloud minutes increase from 400/mo to 500/mo. +- Instead of paying an additional per-seat fee, Docker Build Cloud is now available to all users in your Docker subscription. +- Docker Build Cloud build minutes increase from 400/mo to 500/mo. Docker Build Cloud minutes do not rollover month to month. - Docker Scout now includes unlimited repositories with continuous vulnerability analysis, an increase from 3. -- 500 Testcontainers Cloud runtime minutes are now included for use either in Docker Desktop or for CI. +- 500 Testcontainers Cloud runtime minutes are now included for use either in Docker Desktop or for CI. Testcontainers Cloud runtime minutes do not rollover month to month. - Docker Hub image pull rate limits are removed. -- 100K Docker Hub pulls per month are included. - The minimum number of users is 1 (lowered from 5). For a list of features available in each tier, see [Docker Pricing](https://www.docker.com/pricing/). @@ -245,25 +240,24 @@ use of Docker components including Docker Desktop and Docker Hub. Legacy Docker Business includes: - Everything included in legacy Docker Team -- [Hardened Docker Desktop](../security/for-admins/hardened-desktop/_index.md) -- [Image Access Management](../security/for-admins/hardened-desktop/image-access-management.md) which lets admins control what content developers can access -- [Registry Access Management](../security/for-admins/hardened-desktop/registry-access-management.md) which lets admins control what registries developers can access +- [Hardened Docker Desktop](/manuals/enterprise/security/hardened-desktop/_index.md) +- [Image Access Management](/manuals/enterprise/security/hardened-desktop/image-access-management.md) which lets admins control what content developers can access +- [Registry Access Management](/manuals/enterprise/security/hardened-desktop/registry-access-management.md) which lets admins control what registries developers can access - [Company layer](/admin/company/) to manage multiple organizations and settings - [Single Sign-On](/security/for-admins/single-sign-on/) - [System for Cross-domain Identity Management](/security/for-admins/provisioning/scim/) and more. For a list of features available in each tier, see [Legacy Docker Pricing](https://www.docker.com/legacy-pricing/). -#### Upgrade your Legacy Docker Business plan +#### Upgrade your Legacy Docker Business subscription -When you upgrade your Legacy Docker Business plan to a Docker Business subscription plan, your plan includes the following changes: +When you upgrade your Legacy Docker Business subscription to a Docker Business subscription, your subscription includes the following changes: -- Instead of paying an additional per-seat fee, Docker Build Cloud is now available to all users in your Docker plan. -- Docker Build Cloud included minutes increase from 800/mo to 1500/mo. +- Instead of paying an additional per-seat fee, Docker Build Cloud is now available to all users in your Docker subscription. +- Docker Build Cloud included minutes increase from 800/mo to 1500/mo. Docker Build Cloud minutes do not rollover month to month. - Docker Scout now includes unlimited repositories with continuous vulnerability analysis, an increase from 3. -- 1500 Testcontainers Cloud runtime minutes are now included for use either in Docker Desktop or for CI. +- 1500 Testcontainers Cloud runtime minutes are now included for use either in Docker Desktop or for CI. Testcontainers Cloud runtime minutes do not rollover month to month. - Docker Hub image pull rate limits are removed. -- 1M Docker Hub pulls per month are included. For a list of features available in each tier, see [Docker Pricing](https://www.docker.com/pricing/). @@ -282,20 +276,20 @@ A sales-assisted Docker Business subscription where everything is set up and man ## Legacy Docker Scout subscriptions -This section provides an overview of the legacy subscription plans for Docker +This section provides an overview of the legacy subscriptions for Docker Scout. > [!IMPORTANT] > > As of December 10, 2024, Docker Scout subscriptions are no longer available -> and have been replaced by Docker subscription plans that provide access to +> and have been replaced by Docker subscriptions that provide access to > all tools. If you subscribed or renewed your subscriptions before December 10, 2024, your legacy Docker subscriptions still apply to your account until > you renew. For more details, see [Announcing Upgraded Docker > Plans](https://www.docker.com/blog/november-2024-updated-plans-announcement/). ### Legacy Docker Scout Free -Legacy Docker Scout Free is available for organizations. If you have a Legacy Docker plan, you automatically have access to legacy Docker Scout Free. +Legacy Docker Scout Free is available for organizations. If you have a Legacy Docker subscription, you automatically have access to legacy Docker Scout Free. Legacy Docker Scout Free includes: @@ -319,10 +313,10 @@ Legacy Docker Scout Business includes: - All the features available in legacy Docker Scout Team - Unlimited Docker Scout-enabled repositories -### Upgrade your Legacy Docker Scout plan +### Upgrade your Legacy Docker Scout subscription -When you upgrade your Legacy Docker Scout plan to a Docker subscription plan, your -plan includes the following changes: +When you upgrade your Legacy Docker Scout subscription to a Docker subscription, your +subscription includes the following changes: - Docker Business: Unlimited repositories with continuous vulnerability analysis, an increase from 3. - Docker Team: Unlimited repositories with continuous vulnerability analysis, an increase from 3 @@ -339,16 +333,16 @@ For a list of features available in each tier, see [Docker Pricing](https://www. > [!IMPORTANT] > > As of December 10, 2024, Docker Build Cloud is only available with the -> new Docker Pro, Team, and Business plans. When your plan renews on or after +> new Docker Pro, Team, and Business plans. When your subscription renews on or after > December 10, 2024, you will see an increase in your included Build Cloud > minutes each month. For more details, see [Announcing Upgraded Docker > Plans](https://www.docker.com/blog/november-2024-updated-plans-announcement/). ### Legacy Docker Build Cloud Starter -If you have a Legacy Docker plan, a base level of Build Cloud +If you have a Legacy Docker subscription, a base level of Build Cloud minutes and cache are included. The features available vary depending on your -Legacy Docker plan subscription tier. +Legacy Docker subscription tier. #### Legacy Docker Pro @@ -384,11 +378,11 @@ the organization associated with the subscription. See Manage seats and invites. For more details about your enterprise subscription, [contact sales](https://www.docker.com/products/build-cloud/#contact_sales). -### Upgrade your Legacy Docker Build Cloud plan +### Upgrade your Legacy Docker Build Cloud subscription -You no longer need to subscribe to a separate Docker Build Cloud plan to access -Docker Build Cloud or to scale your minutes. When you upgrade your Legacy Docker plan to -a Docker subscription plan, your plan includes the following changes: +You no longer need to subscribe to a separate Docker Build Cloud subscription to access +Docker Build Cloud or to scale your minutes. When you upgrade your Legacy Docker subscription to +a Docker subscription, your subscription includes the following changes: - Docker Business: Included minutes are increased from 800/mo to 1500/mo with the option to scale more minutes. - Docker Team: Included minutes are increased from 400/mo to 500/mo with the option to scale more minutes. diff --git a/content/manuals/subscription/manage-seats.md b/content/manuals/subscription/manage-seats.md index 5cadecf411a0..dfa6410478b9 100644 --- a/content/manuals/subscription/manage-seats.md +++ b/content/manuals/subscription/manage-seats.md @@ -17,12 +17,12 @@ You can add seats at anytime to your existing subscription. When you add seats to your subscription in the middle of your billing cycle, you are charged a prorated amount for the additional seats. -{{< include "tax-compliance.md" >}} +{{% include "tax-compliance.md" %}} ## Add seats {{< tabs >}} -{{< tab name="Docker plan" >}} +{{< tab name="Docker subscription" >}} > [!IMPORTANT] > @@ -30,33 +30,38 @@ When you add seats to your subscription in the middle of your billing cycle, you To add seats to your subscription: -1. Sign in to [Docker Home](https://app.docker.com/). -2. Under Settings and administration, select **Billing**. -3. Select your account from the drop-down menu in the top-left. -4. Select **Add seats**. -5. Follow the on-screen instructions to complete adding seats. +1. Sign in to [Docker Home](https://app.docker.com/) and select your +organization. +1. Select **Billing**. +1. Select **Add seats** and follow the on-screen instructions to complete +adding seats. + +> [!NOTE] +> +> If you choose to pay using a US bank account, you must verify the account. For +> more information, see [Verify a bank account](manuals/billing/payment-method.md#verify-a-bank-account). You can now add more members to your organization. For more information, see [Manage organization members](../admin/organization/members.md). {{< /tab >}} -{{< tab name="Legacy Docker plan" >}} +{{< tab name="Legacy Docker subscription" >}} > [!IMPORTANT] > > If you have a [sales-assisted Docker Business subscription](details.md#sales-assisted), contact your account manager to add seats to your subscription. -### Add seats to Legacy Docker plan +### Add seats to Legacy Docker subscription 1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select your avatar in the top-left, and select **Billing** from the drop-down menu. -3. On the Billing page, select **Add seats**. -4. Select the number of seats you want to add, then select **Purchase**. +1. Select your organization, then select **Billing**. +1. On the Billing page, select **Add seats**. +1. Select the number of seats you want to add, then select **Purchase**. ### Add seats to Docker Build Cloud -1. Sign in to Docker Build Cloud. -2. Select **Account settings**, then **Add seats**. -3. Select the number of seats you want to add, then select **Add seats**. +1. Sign in to [Docker Home](https://app.docker.com) and select **Build Cloud**. +1. Select **Account settings**, then **Add seats**. +1. Select the number of seats you want to add, then select **Add seats**. {{< /tab >}} {{< /tabs >}} @@ -74,7 +79,7 @@ If you remove seats in the middle of the billing cycle, changes apply in the nex For example, if you receive your billing on the 8th of every month for 10 seats and you want to remove 2 seats on the 15th of the month, the 2 seats will be removed from your subscription the next month. Your payment for 8 seats begins on the next billing cycle. If you're on the annual subscription, the 2 seats are still available until the next year, and your payment for the 8 seats begins on the next billing cycle. {{< tabs >}} -{{< tab name="Docker plan" >}} +{{< tab name="Docker subscription" >}} > [!IMPORTANT] > @@ -82,33 +87,33 @@ For example, if you receive your billing on the 8th of every month for 10 seats To remove seats: -1. Sign in to [Docker Home](https://app.docker.com/). -2. Under Settings and administration, select **Billing**. -3. Select your account from the drop-down menu in the top-left. -4. Select the action icon and then select **Remove seats**. -5. Follow the on-screen instructions to complete removing seats. +1. Sign in to [Docker Home](https://app.docker.com/) and select your +organization. +1. Select **Billing**. +1. In the **Seats** row, select the action icon, then **Remove seats**. +1. Follow the on-screen instructions to complete removing seats. You can cancel the removal of seats before your next billing cycle. To do so, select **Cancel change**. {{< /tab >}} -{{< tab name="Legacy Docker plan" >}} +{{< tab name="Legacy Docker subscription" >}} > [!IMPORTANT] > > If you have a [sales-assisted Docker Business subscription](details.md#sales-assisted), contact your account manager to remove seats from your subscription. -### Remove seats from Legacy Docker plan +### Remove seats from Legacy Docker subscription 1. Sign in to [Docker Hub](https://hub.docker.com). -2. Select your avatar in the top-left, and select **Billing** from the drop-down menu. -3. On the Billing page, select **Remove seats**. -4. Follow the on-screen instructions to complete removing seats. +1. Select your organization, then select **Billing**. +1. On the Billing page, select **Remove seats**. +1. Follow the on-screen instructions to complete removing seats. ### Remove seats from Docker Build Cloud -1. Sign in to [Docker Build Cloud](https://app.docker.com/build). -2. Select **Account settings**, then **Remove seats**. -3. Follow the on-screen instructions to complete removing seats. +1. Sign in to [Docker Home](https://app.docker.com) and select **Build Cloud**. +1. Select **Account settings**, then **Remove seats**. +1. Follow the on-screen instructions to complete removing seats. {{< /tab >}} {{< /tabs >}} \ No newline at end of file diff --git a/content/manuals/subscription/scale.md b/content/manuals/subscription/scale.md index 9f3f51f2e7f5..5e73e79c97bb 100644 --- a/content/manuals/subscription/scale.md +++ b/content/manuals/subscription/scale.md @@ -1,18 +1,18 @@ --- description: Learn how to scale your Docker subscription -keywords: subscription, Pro, Team, business, pricing plan, build minutes, test container minutes, pull limit +keywords: subscription, Pro, Team, business, pricing, build minutes, test container minutes, pull limit title: Scale your subscription weight: 17 --- > [!NOTE] > -> Owners of legacy Docker subscription plans must upgrade their subscription to a new -> Docker subscription plan in order to scale their subscription. +> Owners of legacy Docker subscriptions must upgrade their subscription to a new +> Docker subscription in order to scale their subscription. > -> Legacy Docker plans apply to Docker subscribers who last purchased or renewed +> Legacy Docker subscriptions apply to Docker subscribers who last purchased or renewed > their subscription before December 10, 2024. These subscribers will keep -> their current plan and pricing until their next renewal date that falls on or +> their current subscription and pricing until their next renewal date that falls on or > after December 10, 2024. To see purchase or renewal history, view your > [billing history](../billing/history.md). For more details about legacy > after December 10, 2024. For more details about legacy @@ -20,36 +20,41 @@ weight: 17 > Plans](https://www.docker.com/blog/november-2024-updated-plans-announcement/). Docker subscriptions let you scale your consumption as your needs evolve. Except -for legacy Docker subscription plans, all paid Docker subscriptions come with +for legacy Docker subscriptions, all paid Docker subscriptions come with access to Docker Hub, Docker Build Cloud, and Testcontainers Cloud with a base amount of consumption. See [Docker subscriptions and features](./details.md) to learn how much base consumption comes with each subscription. You can scale your -consumption at any time during your subscription period. All purchased -consumption expires at the end of your subscription term. -amount of consumption. You can scale your consumption at any time during your subscription period. All purchased consumption expires at the end of your subscription term. +consumption at any time during your subscription period. You can scale consumption for the following: -- Docker Build Cloud minutes +- Docker Build Cloud build minutes - Docker Testcontainers Cloud runtime minutes -- Docker Hub storage and image pulls To better understand your needs, you can view your consumption at any time. For more details, see [View Docker product usage](../admin/organization/manage-products.md#view-docker-product-usage). -## Add Docker Build Cloud minutes +> [!WARNING] +> +> The number of Docker Build Cloud and Testcontainers minutes included in your +subscription do not rollover. Additional minutes expire at the end of your +subscription period (monthly or annually). For example, if you have an annual +Docker Team subscription with 500 included minutes, and purchase 500 additional +minutes, only the 500 additional minutes rollover until the end of your annual +subscription period. + +## Add Docker Build Cloud build minutes -You can pre-purchase Docker Build Cloud minutes in the Docker Build Cloud Dashboard: +You can pre-purchase Docker Build Cloud build minutes in the Docker Build Cloud Dashboard: -1. Sign in to [Docker Home](https://app.docker.com/). -2. Under Settings and administration, select **Billing**. -3. On the plans and usage page, select **View build minutes**. - This will launch the Docker Build Cloud settings page. -4. Select **Add minutes**. -5. Select your additional minute amount, then **Continue to payment**. -6. Enter your payment details and billing address. -7. Review your order and select **Pay**. +1. Sign in to [Docker Home](https://app.docker.com/) and choose +your organization. +1. Select **Build Cloud**, then **Build minutes**. +1. Select **Add prepaid minutes**. +1. Select your additional minute amount, then **Continue to payment**. +1. Enter your payment details and billing address. +1. Review your order and select **Pay**. Your additional minutes will now display on the Build minutes page. @@ -60,16 +65,4 @@ You can pre-purchase Testcontainers Cloud runtime minutes by [contacting pre-purchase, you are able to use as many minutes as you need on-demand. The usage will be billed at the end of each monthly billing cycle. On-demand usage is billed at a higher rate than pre-purchased capacity. To avoid on-demand -charges, pre-purchase additional minutes. - -## Add Docker Hub storage or image pulls - -{{< include "hub-limits.md" >}} - -You can pre-purchase images pulls and storage by [contacting -sales](https://www.docker.com/pricing/contact-sales/). - -In addition to pre-purchase, you are able to use as much resources as you need -on-demand. On-demand usage is billed at a higher rate than pre-purchased -capacity. To avoid on-demand charges, pre-purchase additional storage or image -pulls. \ No newline at end of file +charges, pre-purchase additional minutes. \ No newline at end of file diff --git a/content/manuals/subscription/setup.md b/content/manuals/subscription/setup.md index b87bb574db95..22670088e3dc 100644 --- a/content/manuals/subscription/setup.md +++ b/content/manuals/subscription/setup.md @@ -1,6 +1,6 @@ --- description: Learn how to set up your Docker subscription -keywords: subscription, Pro, Team, Business, pricing plan +keywords: subscription, Pro, Team, Business, pricing title: Set up your subscription weight: 15 --- @@ -9,18 +9,18 @@ Docker subscriptions offer features and benefits to support both new and profess In this section, learn how to get started with a Docker subscription for individuals or for organizations. Before you begin, make sure you have a [Docker ID](../accounts/create-account.md). -{{< include "tax-compliance.md" >}} +{{% include "tax-compliance.md" %}} ## Set up a Docker subscription for a personal account -After you [create your Docker ID](../accounts/create-account.md), you have a Docker Personal subscription. To continue using this plan, no further action is necessary. For additional features, you can upgrade to a Docker Pro plan. +After you [create your Docker ID](../accounts/create-account.md), you have a Docker Personal subscription. To continue using this subscription, no further action is necessary. For additional features, you can upgrade to a Docker Pro subscription. To upgrade from Docker Personal to Docker Pro, see [Upgrade your subscription](./change.md#upgrade-your-subscription). ## Set up a Docker subscription for an organization -You can subscribe a new or existing organization to a Docker plan. Only organization owners can manage billing for the organization. +You can subscribe a new or existing organization to a Docker subscription. Only organization owners can manage billing for the organization. -After you [create your Docker ID](../accounts/create-account.md), you have a Docker Personal plan. You must then create an organization and choose a subscription for it. For more details, see [Create your organization](../admin/organization/orgs.md). +After you [create your Docker ID](../accounts/create-account.md), you have a Docker Personal subscription. You must then create an organization and choose a subscription for it. For more details, see [Create your organization](../admin/organization/orgs.md). To learn how to upgrade a Docker subscription for an existing organization, see [Upgrade your subscription](./change.md#upgrade-your-subscription). \ No newline at end of file diff --git a/content/manuals/testcontainers.md b/content/manuals/testcontainers.md new file mode 100644 index 000000000000..52a5c26560dd --- /dev/null +++ b/content/manuals/testcontainers.md @@ -0,0 +1,58 @@ +--- +title: Testcontainers +description: Learn how to use Testcontainers to run containers programmatically in your preferred programming language. +keywords: docker APIs, docker, testcontainers documentation, testcontainers, testcontainers oss, testcontainers oss documentation, + docker compose, docker-compose, java, golang, go +params: + sidebar: + group: Open source +intro: +- title: What is Testcontainers? + description: Learn about what Testcontainers does and its key benefits + icon: feature_search + link: https://testcontainers.com/getting-started/#what-is-testcontainers +- title: The Testcontainers workflow + description: Understand the Testcontainers workflow + icon: explore + link: https://testcontainers.com/getting-started/#testcontainers-workflow +quickstart: +- title: Testcontainers for Go + description: A Go package that makes it simple to create and clean up container-based dependencies for automated integration/smoke tests. + icon: /icons/go.svg + link: https://golang.testcontainers.org/quickstart/ +- title: Testcontainers for Java + description: A Java library that supports JUnit tests, providing lightweight, throwaway instances of anything that can run in a Docker container. + icon: /icons/java.svg + link: https://java.testcontainers.org/ +--- + +Testcontainers is a set of open source libraries that provides easy and lightweight APIs for bootstrapping local development and test dependencies with real services wrapped in Docker containers. +Using Testcontainers, you can write tests that depend on the same services you use in production without mocks or in-memory services. + +{{< grid items=intro >}} + +## Quickstart + +### Supported languages + +Testcontainers provide support for the most popular languages, and Docker sponsors the development of the following Testcontainers implementations: + +- [Go](https://golang.testcontainers.org/quickstart/) +- [Java](https://java.testcontainers.org/quickstart/junit_5_quickstart/) + +The rest are community-driven and maintained by independent contributors. + +### Prerequisites + +Testcontainers requires a Docker-API compatible container runtime. +During development, Testcontainers is actively tested against recent versions of Docker on Linux, as well as against Docker Desktop on Mac and Windows. +These Docker environments are automatically detected and used by Testcontainers without any additional configuration being necessary. + +It is possible to configure Testcontainers to work for other Docker setups, such as a remote Docker host or Docker alternatives. +However, these are not actively tested in the main development workflow, so not all Testcontainers features might be available +and additional manual configuration might be necessary. + +If you have further questions about configuration details for your setup or whether it supports running Testcontainers-based tests, + contact the Testcontainers team and other users from the Testcontainers community on [Slack](https://slack.testcontainers.org/). + + {{< grid items=quickstart >}} diff --git a/content/manuals/unassociated-machines/_index.md b/content/manuals/unassociated-machines/_index.md new file mode 100644 index 000000000000..033e29f007af --- /dev/null +++ b/content/manuals/unassociated-machines/_index.md @@ -0,0 +1,213 @@ +--- +title: Manage unassociated machines +description: Learn how to manage unassociated machines using the Docker Admin Console +keywords: unassociated machines, insights, manage users, enforce sign-in +sitemap: false +pagefind_exclude: true +noindex: true +params: + sidebar: + group: Enterprise +--- + +{{% restricted title="About unassociated machines" %}} +Unassociated machines is a private feature. Your Docker +Account Executive must enable the feature for you. +{{% /restricted %}} + +Docker administrators can identify, view, and manage Docker Desktop machines +that are likely associated with their organization but aren't currently linked +to user accounts. This self-service capability helps you understand Docker +Desktop usage across your organization and streamline user onboarding without +IT involvement. + +## Prerequisites + +- Docker Business subscription +- Organization owner access to your Docker organization + +## About unassociated machines + +Unassociated machines are Docker Desktop instances that Docker has identified +as likely belonging to your organization based on usage patterns, but the users +are not signed in to Docker Desktop with an account that is part of your +organization. + +## How Docker identifies unassociated machines + +Docker uses telemetry data to identify which machines likely belong to your +organization: + +- Domain matching: Users signed in with email domains associated with your +organization +- Registry patterns: Analysis of container registry access patterns that +indicate organizational usage + +## View unassociated machines + +To see detailed information about unassociated machines: + +1. Sign in to the [Admin Console](https://app.docker.com/admin) and select +your organization. +1. In **User management**, select **Unassociated**. + +The machine list displays: + +- Machine ID (Docker-generated identifier) +- The registry address used to predict whether a user is part of your +organization +- User email (only displays if the user is signed into Docker Desktop while +using it) +- Docker Desktop version +- Operating system (OS) +- Last activity date +- Sign-in enforced status + +You can: + +- Export the list as CSV +- Take actions on individual or multiple machines + +## Enable sign-in enforcement for unassociated machines + +> [!NOTE] +> +> Sign-in enforcement for unassociated machines is different from +the [organization-level sign-in enforcement](/enterprise/security/enforce-sign-in/) +available through `registry.json` and configuration profiles. This sign-in +enforcement only requires users to sign in so admins can identify who is +using the machine, meaning users can sign in with any email address. For more +stringent security controls that limit sign-ins to users who are already part +of your organization, see [Enforce sign-in](/enterprise/security/enforce-sign-in/). + +Sign-in enforcement helps you identify who is using unassociated machines in +your organization. When you enable enforcement, users on these machines will +be required to sign in to Docker Desktop. Once they sign in, their email +addresses will appear in the Unassociated list, allowing you to then add them +to your organization. + +> [!IMPORTANT] +> +> Sign-in enforcement only takes effect after Docker Desktop is restarted. +Users can continue using Docker Desktop until their next restart. + +### Enable sign-in enforcement for all unassociated machines + +1. Sign in to the [Admin Console](https://app.docker.com/admin) and select +your organization. +1. In **User management**, select **Unassociated**. +1. Turn on the **Enforce sign-in** toggle. +1. In the pop-up modal, select **Require sign-in** to confirm. + +The **Sign-in required** status will update for all unassociated machines to +**Yes**. + +> [!NOTE] +> +> When you enable sign-in enforcement for all unassociated machines, any new +machines detected in the future will automatically have sign-in enforcement +enabled. Sign-in enforcement requires Docker Desktop version 4.41 or later. +Users with older versions will not be prompted to sign in and can continue +using Docker Desktop normally until they update. Their status shows +as **Pending** until they update to version 4.41 or later. + +### Enable sign-in enforcement for individual unassociated machines + +1. Sign in to the [Admin Console](https://app.docker.com/admin) and select +your organization. +1. In **User management**, select **Unassociated**. +1. Locate the machine you want to enable sign-in enforcement for. +1. Select the **Actions** menu and choose **Turn on sign-in enforcement**. +1. In the pop-up modal, select **Require sign-in** to confirm. + +The **Sign-in required** status will update for the individual machine to +**Yes**. + +> [!NOTE] +> +> Sign-in enforcement requires Docker Desktop version 4.41 or later. Users +with older versions will not be prompted to sign in and can continue using +Docker Desktop normally until they update. Their status shows as **Pending** +until they update to version 4.41 or later. + +### What happens when users sign in + +After you enable sign-in enforcement: + +1. Users must restart Docker Desktop. Enforcement only takes effect after +restart. +1. When users open Docker Desktop, they see a sign-in prompt. They must sign +in to continue using Docker Desktop. +1. User email addresses appear in the **Unassociated** list. +1. You can add users to your organization. + +Users can continue using Docker Desktop immediately after signing in, even +before being added to your organization. + +## Add unassociated machines to your organization + +When users in your organization use Docker without signing in, their machines +appear in the **Unassociated** list. You can add these users to your +organization in two ways: + +- Automatic addition: + - Auto-provisioning: If you have verified domains with auto-provisioning + enabled, users who sign in with a matching email domain will automatically + be added to your organization. For more information on verifying domains and + auto-provisioning, see [Domain management](/manuals/enterprise/security/domain-management.md). + - SSO user provisioning: If you have SSO configured with + [Just-in-Time provisioning](/manuals/enterprise/security/provisioning/just-in-time.md), + users who sign in through your SSO connection will automatically be added + to your organization. +- Manual addition: If you don't have auto-provisioning or SSO set up, or if a +user's email domain doesn't match your configured domains, their email will +appear in the **Unassociated** list where you can choose to add them directly. + +> [!NOTE] +> +> If you add users and do not have enough seats in your organization, a +pop-up will appear prompting you to **Get more seats**. + +### Add individual users + +1. Sign in to the [Admin Console](https://app.docker.com/admin) and select +your organization. +1. In **User management**, select **Unassociated**. +1. Locate the machine you want to add to your organization. +1. Select the **Actions** menu and choose **Add to organization**. +1. In the pop-up modal, select **Add user**. + +### Bulk add users + +1. Sign in to the [Admin Console](https://app.docker.com/admin) and select +your organization. +1. In **User management**, select **Unassociated**. +1. Use the **checkboxes** to select the machines you want to add to your +organizations. +1. Select the **Add to organization** button. +1. In the pop-up modal, select **Add users** to confirm. + +## Disable sign-in enforcement + +### Disable for all unassociated machines + +1. Sign in to the [Admin Console](https://app.docker.com/admin) and select +your organization. +1. In **User management**, select **Unassociated**. +1. Turn off the **Enforce sign-in** toggle. +1. In the pop-up modal, select **Turn off sign-in requirement** to confirm. + +The **Sign-in required** status will update for all unassociated machines to +**No**. + +### Disable for specific unassociated machines + +1. Sign in to the [Admin Console](https://app.docker.com/admin) and select +your organization. +1. In **User management**, select **Unassociated**. +1. Locate the machine you want to disable sign-in enforcement for. +1. Select the **Actions** menu and choose **Turn off sign-in enforcement**. +1. In the pop-up modal, select **Turn off sign-in requirement** to confirm. + +The **Sign-in required** status will update for the individual machine to +**No**. diff --git a/content/reference/_index.md b/content/reference/_index.md index b0b093cd702d..f39ef589bc5c 100644 --- a/content/reference/_index.md +++ b/content/reference/_index.md @@ -41,7 +41,11 @@ params: - title: DVP Data API description: API for Docker Verified Publishers to fetch analytics data. icon: area_chart - link: /reference/api/hub/dvp/ + link: /reference/api/dvp/latest/ + - title: Registry API + description: API for Docker Registry. + icon: database + link: /reference/api/registry/latest/ --- This section includes the reference documentation for the Docker platform's diff --git a/content/manuals/security/for-admins/_index.md b/content/reference/api/dvp/_index.md similarity index 51% rename from content/manuals/security/for-admins/_index.md rename to content/reference/api/dvp/_index.md index 7e60da1fad8d..18f6030ead2c 100644 --- a/content/manuals/security/for-admins/_index.md +++ b/content/reference/api/dvp/_index.md @@ -1,6 +1,6 @@ --- +title: DVP Data API build: render: never -title: For admins -weight: 10 --- + diff --git a/content/reference/api/dvp/changelog.md b/content/reference/api/dvp/changelog.md new file mode 100644 index 000000000000..0fc5f5b9dd89 --- /dev/null +++ b/content/reference/api/dvp/changelog.md @@ -0,0 +1,20 @@ +--- +description: Docker Verified Publisher API changelog +title: Docker Verified Publisher API changelog +linkTitle: Changelog +keywords: docker dvp, dvp, whats new, release notes, api, changelog +weight: 2 +toc_min: 1 +toc_max: 2 +--- + +Here you can learn about the latest changes, new features, bug fixes, and known +issues for Docker Verified Publisher API. + +--- + +## 2025-06-27 + +### New + +- Create changelog diff --git a/content/reference/api/dvp/deprecated.md b/content/reference/api/dvp/deprecated.md new file mode 100644 index 000000000000..a9d1330344b0 --- /dev/null +++ b/content/reference/api/dvp/deprecated.md @@ -0,0 +1,37 @@ +--- +description: Deprecated Docker Verified Publisher API endpoints +keywords: deprecated +title: Deprecated Docker Verified Publisher API endpoints +linkTitle: Deprecated +weight: 3 +--- + +This page provides an overview of endpoints that are deprecated in Docker Verified Publisher API. + +## Endpoint deprecation policy + +As changes are made to Docker there may be times when existing endpoints need to be removed or replaced with newer endpoints. Before an existing endpoint is removed it is labeled as "deprecated" within the documentation. After some time it may be removed. + +## Deprecated endpoints + +The following table provides an overview of the current status of deprecated endpoints: + +**Deprecated**: the endpoint is marked "deprecated" and should no longer be used. + +The endpoint may be removed, disabled, or change behavior in a future release. + +**Removed**: the endpoint was removed, disabled, or hidden. + +--- + +| Status | Feature | Date | +|--------|---------------------------------------------------------------|------------| +| | [Create deprecation log table](#create-deprecation-log-table) | 2025-06-27 | + +--- + +### Create deprecation log table + +Reformat page + +--- \ No newline at end of file diff --git a/content/reference/api/hub/dvp.md b/content/reference/api/dvp/latest.md similarity index 73% rename from content/reference/api/hub/dvp.md rename to content/reference/api/dvp/latest.md index 1ff43e1dd783..1ef4fdf075ba 100644 --- a/content/reference/api/hub/dvp.md +++ b/content/reference/api/dvp/latest.md @@ -1,6 +1,9 @@ --- layout: api description: Reference documentation and Swagger (OpenAPI) specification for the Docker Verified Publisher API. -linkTitle: DVP Data API title: Docker Verified Publisher API reference +linkTitle: Latest +weight: 1 +aliases: + - /reference/api/hub/dvp/ --- diff --git a/content/reference/api/dvp/latest.yaml b/content/reference/api/dvp/latest.yaml new file mode 100644 index 000000000000..8ff2030acab3 --- /dev/null +++ b/content/reference/api/dvp/latest.yaml @@ -0,0 +1,696 @@ +openapi: 3.0.0 +info: + title: DVP Data API + version: 1.0.0 + x-logo: + url: https://docs.docker.com/assets/images/logo-docker-main.png + href: /reference + description: | + The Docker DVP Data API allows [Docker Verified Publishers](https://docs.docker.com/docker-hub/publish/) to view image pull analytics data for their namespaces. Analytics data can be retrieved as raw data, or in a summary format. + + #### Summary data + + In your summary data CSV, you will have access to the data points listed below. You can request summary data for a complete week (Monday through Sunday) or for a complete month (available on the first day of the following month). + + There are two levels of summary data: + + - Repository-level, a summary of every namespace and repository + - Tag- or digest-level, a summary of every namespace, repository, and reference + (tag or digest) + + The summary data formats contain the following data points: + + - Unique IP address count + - Pulls by tag count + - Pulls by digest count + - Version check count + + #### Raw data + + In your raw data CSV you will have access to the data points listed below. You can request raw data for a complete week (Monday through Sunday) or for a complete month (available on the first day of the following month). **Note:** each action is represented as a single row. + + - Type (industry) + - Host (cloud provider) + - Country (geolocation) + - Timestamp + - Namespace + - Repository + - Reference (digest is always included, tag is provided when available) + - HTTP request method + - Action, one of the following: + - Pull by tag + - Pull by digest + - Version check + - User-Agent + +servers: + - url: https://hub.docker.com/api/publisher/analytics/v1 +security: + - HubAuth: [] + +features.openapi: + schemaDefinitionsTagName: Schemas + +tags: + - name: authentication + x-displayName: Authentication Endpoints + - name: namespaces + x-displayName: Namespace data + - name: discovery + x-displayName: Discovery + - name: responseDataFile + x-displayName: ResponseDataFile + description: | + + - name: yearModel + x-displayName: Year Data Model + description: | + + - name: monthModel + x-displayName: Month Data Model + description: | + + - name: weekModel + x-displayName: Week Data Model + description: | + + +x-tagGroups: + - name: API + tags: + - authentication + - discovery + - namespaces + - name: Models + tags: + - responseDataFile + - yearModel + - monthModel + - weekModel + +paths: + /v2/users/login: + security: [] + servers: + - url: https://hub.docker.com + post: + security: [] + tags: + - authentication + summary: Create an authentication token + operationId: PostUsersLogin + description: | + Creates and returns a bearer token in JWT format that you can use to + authenticate with Docker Hub APIs. + + The returned token is used in the HTTP Authorization header like `Authorization: Bearer {TOKEN}`. + + Most Docker Hub APIs require this token either to consume or to get detailed information. For example, to list images in a private repository. + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/UsersLoginRequest" + description: Login details. + required: true + responses: + 200: + description: Authentication successful + content: + application/json: + schema: + $ref: "#/components/schemas/PostUsersLoginSuccessResponse" + 401: + description: Authentication failed or second factor required + content: + application/json: + schema: + $ref: "#/components/schemas/PostUsersLoginErrorResponse" + /v2/users/2fa-login: + security: [] + servers: + - url: https://hub.docker.com + post: + security: [] + tags: + - authentication + summary: Second factor authentication + operationId: PostUsers2FALogin + description: | + When a user has 2FA enabled, this is the second call to perform after + `/v2/users/login` call. + + Creates and returns a bearer token in JWT format that you can use to authenticate with Docker Hub APIs. + + The returned token is used in the HTTP Authorization header like `Authorization: Bearer {TOKEN}`. + + Most Docker Hub APIs require this token either to consume or to get detailed information. For example, to list images in a private repository. + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/Users2FALoginRequest" + description: Login details. + required: true + responses: + 200: + description: Authentication successful + content: + application/json: + schema: + $ref: "#/components/schemas/PostUsersLoginSuccessResponse" + 401: + description: Authentication failed or second factor required + content: + application/json: + schema: + $ref: "#/components/schemas/PostUsers2FALoginErrorResponse" + + + /: + get: + tags: [discovery] + summary: Get namespaces and repos + description: Gets a list of your namespaces and repos which have data available. + operationId: getNamespaces + responses: + '200': + description: Success + content: + application/json: + schema: + $ref: '#/components/schemas/NamespaceData' + /namespaces: + get: + tags: [discovery] + summary: Get user's namespaces + description: Get metadata associated with the namespaces the user has access to, including extra repos associated with the namespaces. + operationId: getUserNamespaces + responses: + '200': + description: Success + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/NamespaceMetadata' + '401': + description: Authentication failed or second factor required + /namespaces/{namespace}: + get: + tags: [discovery] + summary: Get namespace + description: Gets metadata associated with specified namespace, including extra repos associated with the namespace. + operationId: getNamespace + parameters: + - in: path + name: namespace + schema: + type: string + required: true + description: Namespace to fetch data for + responses: + '200': + description: Success + content: + application/json: + schema: + $ref: '#/components/schemas/NamespaceMetadata' + /namespaces/{namespace}/pulls: + get: + tags: [namespaces] + summary: Get pull data + description: Gets pulls for the given namespace. + operationId: getNamespacePulls + parameters: + - in: path + name: namespace + schema: + type: string + required: true + description: Namespace to fetch data for + - in: query + name: timespan + schema: + $ref: '#/components/schemas/TimespanType' + required: false + description: Timespan type for fetching data + - in: query + name: period + schema: + $ref: '#/components/schemas/PeriodType' + required: false + description: Relative period of the period to fetch data + - in: query + name: group + schema: + $ref: '#/components/schemas/GroupType' + required: false + description: Field to group the data by + responses: + '200': + description: Success + content: + application/json: + schema: + $ref: '#/components/schemas/PullData' + '404': + description: Not found - namespace doesn't exist or user does not have permission to access it + /namespaces/{namespace}/repos/{repo}/pulls: + get: + tags: [namespaces] + summary: Get pull data + description: Gets pulls for the given repo. + operationId: getRepoPulls + parameters: + - in: path + name: namespace + schema: + type: string + required: true + description: Namespace to fetch data for + - in: path + name: repo + schema: + type: string + required: true + description: Repository to fetch data for + - in: query + name: timespan + schema: + $ref: '#/components/schemas/TimespanType' + required: false + description: Timespan type for fetching data + - in: query + name: period + schema: + $ref: '#/components/schemas/PeriodType' + required: false + description: Relative period of the period to fetch data + - in: query + name: group + schema: + $ref: '#/components/schemas/GroupType' + required: false + description: Field to group the data by + responses: + '200': + description: Success + content: + application/json: + schema: + $ref: '#/components/schemas/PullData' + '404': + description: Not found - repo doesn't exist or user does not have permission to access it + /namespaces/{namespace}/pulls/exports/years: + get: + tags: [namespaces] + summary: Get years with data + description: Gets a list of years that have data for the given namespace. + operationId: getNamespaceYears + parameters: + - in: path + name: namespace + schema: + type: string + required: true + description: Namespace to fetch data for + responses: + '200': + description: Success + content: + application/json: + schema: + $ref: '#/components/schemas/YearData' + /namespaces/{namespace}/pulls/exports/years/{year}/{timespantype}: + get: + tags: [namespaces] + summary: Get timespans with data + description: Gets a list of timespans of the given type that have data for the given namespace and year. + operationId: getNamespaceTimespans + parameters: + - in: path + name: namespace + schema: + type: string + required: true + description: Namespace to fetch data for + - in: path + name: year + schema: + type: integer + required: true + description: Year to fetch data for + - in: path + name: timespantype + schema: + $ref: '#/components/schemas/TimespanType' + required: true + description: Type of timespan to fetch data for + responses: + '200': + description: Success + content: + application/json: + schema: + $ref: '#/components/schemas/TimespanData' + /namespaces/{namespace}/pulls/exports/years/{year}/{timespantype}/{timespan}: + get: + tags: [namespaces] + summary: Get namespace metadata for timespan + description: Gets info about data for the given namespace and timespan. + operationId: getNamespaceTimespanMetadata + parameters: + - in: path + name: namespace + schema: + type: string + required: true + description: Namespace to fetch data for + - in: path + name: year + schema: + type: integer + required: true + description: Year to fetch data for + - in: path + name: timespantype + schema: + $ref: '#/components/schemas/TimespanType' + required: true + description: Type of timespan to fetch data for + - in: path + name: timespan + schema: + type: integer + required: true + description: Timespan to fetch data for + responses: + '200': + description: Success + content: + application/json: + schema: + $ref: '#/components/schemas/TimespanModel' + '404': + description: Not Found + /namespaces/{namespace}/pulls/exports/years/{year}/{timespantype}/{timespan}/{dataview}: + get: + tags: [namespaces] + summary: Get namespace data for timespan + description: Gets a list of URLs that can be used to download the pull data for the given namespace and timespan. + operationId: getNamespaceDataByTimespan + parameters: + - in: path + name: namespace + schema: + type: string + required: true + description: Namespace to fetch data for + - in: path + name: year + schema: + type: integer + required: true + description: Year to fetch data for + - in: path + name: timespantype + schema: + $ref: '#/components/schemas/TimespanType' + required: true + description: Type of timespan to fetch data for + - in: path + name: timespan + schema: + type: integer + required: true + description: Timespan to fetch data for + - in: path + name: dataview + schema: + $ref: '#/components/schemas/DataviewType' + required: true + description: Type of data to fetch + responses: + '200': + description: Success + content: + application/json: + schema: + $ref: '#/components/schemas/ResponseData' + /repos/pulls: + get: + tags: [namespaces] + summary: Get pull data for multiple repos + description: Gets pull for the given repos. + operationId: getManyReposPulls + parameters: + - in: query + name: repos + schema: + type: array + items: + type: string + required: true + description: Repositories to fetch data for (maximum of 50 repositories per request). + - in: query + name: timespan + schema: + $ref: '#/components/schemas/TimespanType' + required: false + description: Timespan type for fetching data + - in: query + name: period + schema: + $ref: '#/components/schemas/PeriodType' + required: false + description: Relative period of the period to fetch data + - in: query + name: group + schema: + $ref: '#/components/schemas/GroupType' + required: false + description: Field to group the data by + responses: + '200': + description: Success + content: + application/json: + schema: + $ref: '#/components/schemas/ReposPullData' + +components: + schemas: + UsersLoginRequest: + description: User login details + type: object + required: + - username + - password + properties: + username: + description: The username of the Docker Hub account to authenticate with. + type: string + example: myusername + password: + description: + The password or personal access token (PAT) of the Docker Hub + account to authenticate with. + type: string + example: hunter2 + PostUsersLoginSuccessResponse: + description: successful user login response + type: object + properties: + token: + description: | + Created authentication token. + + This token can be used in the HTTP Authorization header as a JWT to authenticate with the Docker Hub APIs. + type: string + example: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c + nullable: false + PostUsersLoginErrorResponse: + description: failed user login response or second factor required + type: object + required: + - detail + properties: + detail: + description: Description of the error. + type: string + example: Incorrect authentication credentials + nullable: false + login_2fa_token: + description: + Short-lived token to be used on `/v2/users/2fa-login` to + complete the authentication. This field is present only if 2FA is + enabled. + type: string + example: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c + nullable: true + Users2FALoginRequest: + description: Second factor user login details + type: object + required: + - login_2fa_token + - code + properties: + login_2fa_token: + description: The intermediate 2FA token returned from `/v2/users/login` API. + type: string + example: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c + code: + description: + The Time-based One-Time Password of the Docker Hub account to + authenticate with. + type: string + example: 123456 + PostUsers2FALoginErrorResponse: + description: failed second factor login response. + type: object + properties: + detail: + description: Description of the error. + type: string + example: Incorrect authentication credentials + nullable: false + + ResponseData: + properties: + data: + type: array + description: | + List of urls to download the data. When the data is large, the data will be split into multiple files. + items: + $ref: '#/components/schemas/ResponseDataFile' + ResponseDataFile: + properties: + url: + type: string + size: + type: integer + format: int64 + NamespaceData: + properties: + namespaces: + type: array + items: + type: string + NamespaceMetadata: + properties: + namespace: + type: string + extraRepos: + type: array + items: + type: string + datasets: + type: array + items: + $ref: '#/components/schemas/DatasetModel' + DatasetModel: + properties: + name: + $ref: '#/components/schemas/DatasetType' + views: + type: array + items: + $ref: '#/components/schemas/DataviewType' + timespans: + type: array + items: + $ref: '#/components/schemas/TimespanType' + PullData: + properties: + pulls: + type: array + items: + $ref: '#/components/schemas/PullModel' + ReposPullData: + properties: + repos: + type: object + additionalProperties: + $ref: '#/components/schemas/PullData' + PullModel: + properties: + start: + type: string + end: + type: string + repo: + type: string + namespace: + type: string + pullCount: + type: integer + ipCount: + type: integer + country: + type: string + + YearData: + properties: + years: + type: array + items: + $ref: '#/components/schemas/YearModel' + YearModel: + properties: + year: + type: integer + MonthData: + properties: + months: + type: array + items: + $ref: '#/components/schemas/MonthModel' + MonthModel: + properties: + month: + type: integer + WeekData: + properties: + weeks: + type: array + items: + $ref: '#/components/schemas/WeekModel' + WeekModel: + properties: + week: + type: integer + TimespanType: + type: string + enum: [months,weeks] + PeriodType: + type: string + enum: [last-2-months,last-3-months,last-6-months,last-12-months] + DataviewType: + type: string + enum: [raw,summary,repo-summary,namespace-summary] + DatasetType: + type: string + enum: [pulls] + TimespanModel: + oneOf: + - $ref: '#/components/schemas/MonthModel' + - $ref: '#/components/schemas/WeekModel' + TimespanData: + oneOf: + - $ref: '#/components/schemas/MonthData' + - $ref: '#/components/schemas/WeekData' + GroupType: + type: string + enum: [repo,namespace] + securitySchemes: + HubAuth: + type: https + scheme: bearer + bearerFormat: JWT + description: | + JWT Bearer Authentication is required to access the Docker DVP Data API. + + This authentication documentation is duplicated from the [Hub API Authentication docs](https://docs.docker.com/reference/api/hub/#tag/authentication) + x-displayName: Docker Hub Authentication diff --git a/content/reference/api/engine/_index.md b/content/reference/api/engine/_index.md index 2511ce1ba3c3..82ffdbb302f2 100644 --- a/content/reference/api/engine/_index.md +++ b/content/reference/api/engine/_index.md @@ -73,23 +73,22 @@ To see the highest version of the API your Docker daemon and client support, use ```console $ docker version Client: Docker Engine - Community - Version: 27.4.0 - API version: 1.47 - Go version: go1.22.10 - Git commit: bde2b89 - Built: Sat Dec 7 10:38:33 2024 + Version: 28.3.2 + API version: 1.51 + Go version: go1.24.5 + Git commit: 578ccf6 + Built: Wed Jul 9 16:13:45 2025 OS/Arch: linux/amd64 Context: default Server: Docker Engine - Community Engine: - Version: 27.4.0 - API version: 1.47 (minimum version 1.24) - Go version: go1.22.10 - Git commit: 92a8393 - Built: Sat Dec 7 10:38:33 2024 + Version: 28.3.2 + API version: 1.51 (minimum version 1.24) + Go version: go1.24.5 + Git commit: e77ff99 + Built: Wed Jul 9 16:13:45 2025 OS/Arch: linux/amd64 - Experimental: false ... ``` @@ -131,8 +130,13 @@ You can specify the API version to use in any of the following ways: ### API version matrix -| Docker version | Maximum API version | Change log | -|:---------------|:---------------------------|:-----------------------------------------------------------------------------| +| Docker version | Maximum API version | Change log | +|:---------------|:---------------------------------------------|:-------------------------------------------------------------------| +| 28.3 | [1.51](/reference/api/engine/version/v1.51/) | [changes](/reference/api/engine/version-history/#v151-api-changes) | +| 28.2 | [1.50](/reference/api/engine/version/v1.50/) | [changes](/reference/api/engine/version-history/#v150-api-changes) | +| 28.1 | [1.49](/reference/api/engine/version/v1.49/) | [changes](/reference/api/engine/version-history/#v149-api-changes) | +| 28.0 | [1.48](/reference/api/engine/version/v1.48/) | [changes](/reference/api/engine/version-history/#v148-api-changes) | +| 27.5 | [1.47](/reference/api/engine/version/v1.47/) | [changes](/reference/api/engine/version-history/#v147-api-changes) | | 27.4 | [1.47](/reference/api/engine/version/v1.47/) | [changes](/reference/api/engine/version-history/#v147-api-changes) | | 27.3 | [1.47](/reference/api/engine/version/v1.47/) | [changes](/reference/api/engine/version-history/#v147-api-changes) | | 27.2 | [1.47](/reference/api/engine/version/v1.47/) | [changes](/reference/api/engine/version-history/#v147-api-changes) | diff --git a/content/reference/api/engine/version/v1.47.md b/content/reference/api/engine/version/v1.47.md index 96c2f6c8af15..4742bfed2d5e 100644 --- a/content/reference/api/engine/version/v1.47.md +++ b/content/reference/api/engine/version/v1.47.md @@ -3,6 +3,4 @@ linkTitle: v1.47 title: Docker Engine API v1.47 reference aliases: - /engine/api/v1.47/ - - /engine/api/latest/ - - /reference/api/engine/latest/ --- diff --git a/content/reference/api/engine/version/v1.48.md b/content/reference/api/engine/version/v1.48.md new file mode 100644 index 000000000000..88e36b559748 --- /dev/null +++ b/content/reference/api/engine/version/v1.48.md @@ -0,0 +1,6 @@ +--- +linkTitle: v1.48 +title: Docker Engine API v1.48 reference +aliases: + - /engine/api/v1.48/ +--- diff --git a/content/reference/api/engine/version/v1.49.md b/content/reference/api/engine/version/v1.49.md new file mode 100644 index 000000000000..ba8a5007cf5c --- /dev/null +++ b/content/reference/api/engine/version/v1.49.md @@ -0,0 +1,6 @@ +--- +linkTitle: v1.49 +title: Docker Engine API v1.49 reference +aliases: + - /engine/api/v1.49/ +--- diff --git a/content/reference/api/engine/version/v1.50.md b/content/reference/api/engine/version/v1.50.md new file mode 100644 index 000000000000..eb6246ccb9b5 --- /dev/null +++ b/content/reference/api/engine/version/v1.50.md @@ -0,0 +1,6 @@ +--- +linkTitle: v1.50 +title: Docker Engine API v1.50 reference +aliases: + - /engine/api/v1.50/ +--- diff --git a/content/reference/api/engine/version/v1.51.md b/content/reference/api/engine/version/v1.51.md new file mode 100644 index 000000000000..0979800382c0 --- /dev/null +++ b/content/reference/api/engine/version/v1.51.md @@ -0,0 +1,8 @@ +--- +linkTitle: v1.51 +title: Docker Engine API v1.51 reference +aliases: + - /engine/api/v1.51/ + - /engine/api/latest/ + - /reference/api/engine/latest/ +--- diff --git a/content/reference/api/extensions-sdk/DockerDesktopClient.md b/content/reference/api/extensions-sdk/DockerDesktopClient.md index 9ab538f8f2f8..40966eb00fc4 100644 --- a/content/reference/api/extensions-sdk/DockerDesktopClient.md +++ b/content/reference/api/extensions-sdk/DockerDesktopClient.md @@ -401,30 +401,6 @@ DockerDesktopClientV0.navigateToVolume ___ -### navigateToDevEnvironments - -▸ **navigateToDevEnvironments**(): `void` - -Navigate to the Dev Environments window in Docker Desktop. - -```typescript -window.ddClient.navigateToDevEnvironments(); -``` - -> [!WARNING] -> -> It will be removed in a future version. Use [viewDevEnvironments](NavigationIntents.md#viewdevenvironments) instead. - -#### Returns - -`void` - -#### Inherited from - -DockerDesktopClientV0.navigateToDevEnvironments - -___ - ## Other Methods ### execHostCmd diff --git a/content/reference/api/extensions-sdk/NavigationIntents.md b/content/reference/api/extensions-sdk/NavigationIntents.md index 6868a8c8d9dc..743b78462110 100644 --- a/content/reference/api/extensions-sdk/NavigationIntents.md +++ b/content/reference/api/extensions-sdk/NavigationIntents.md @@ -197,24 +197,6 @@ A promise that fails if the image doesn't exist. ___ -## Other Methods - -### viewDevEnvironments - -▸ **viewDevEnvironments**(): `Promise`<`void`\> - -Navigate to the Dev Environments window in Docker Desktop. - -```typescript -ddClient.desktopUI.navigate.viewDevEnvironments() -``` - -#### Returns - -`Promise`<`void`\> - -___ - ## Volume Methods ### viewVolumes diff --git a/content/reference/api/hub/changelog.md b/content/reference/api/hub/changelog.md new file mode 100644 index 000000000000..77275450bd0c --- /dev/null +++ b/content/reference/api/hub/changelog.md @@ -0,0 +1,43 @@ +--- +description: Docker Hub API changelog +title: Docker Hub API changelog +linkTitle: Changelog +keywords: docker hub, hub, whats new, release notes, api, changelog +weight: 2 +toc_min: 1 +toc_max: 2 +aliases: + - /reference/api/hub/latest-changelog/ +--- + +Here you can learn about the latest changes, new features, bug fixes, and known +issues for Docker Service APIs. + +--- + +## 2025-06-27 + +### New + +- Add [List repositories](/reference/api/hub/latest/#tag/repositories/operation/listNamespaceRepositories) endpoints for a given `namespace`. + +### Deprecations + +- [Deprecate /v2/repositories/{namespace}](/reference/api/hub/deprecated/#deprecate-legacy-listnamespacerepositories) + +--- + +## 2025-03-25 + +### New + +- Add [APIs](/reference/api/hub/latest/#tag/org-access-tokens) for organization access token (OATs) management. + +--- + +## 2025-03-18 + +### New + +- Add access to [audit logs](/reference/api/hub/latest/#tag/audit-logs) for org + access tokens. diff --git a/content/reference/api/hub/deprecated.md b/content/reference/api/hub/deprecated.md index 97adcf67ae08..fc7d1ec78546 100644 --- a/content/reference/api/hub/deprecated.md +++ b/content/reference/api/hub/deprecated.md @@ -1,14 +1,54 @@ --- -description: Docker Hub API v1 (deprecated) -keywords: kitematic, deprecated -title: Docker Hub API v1 (deprecated) +description: Deprecated Docker Hub API endpoints +keywords: deprecated +title: Deprecated Docker Hub API endpoints +linkTitle: Deprecated +weight: 3 aliases: - - /docker-hub/api/deprecated/ + - /docker-hub/api/deprecated/ --- -> **Deprecated** -> -> Docker Hub API v1 has been deprecated. Please use Docker Hub API v2 instead. +This page provides an overview of endpoints that are deprecated in Docker Hub API. + +## Endpoint deprecation policy + +As changes are made to Docker there may be times when existing endpoints need to be removed or replaced with newer endpoints. Before an existing endpoint is removed it is labeled as "deprecated" within the documentation. After some time it may be removed. + +## Deprecated endpoints + +The following table provides an overview of the current status of deprecated endpoints: + +**Deprecated**: the endpoint is marked "deprecated" and should no longer be used. + +The endpoint may be removed, disabled, or change behavior in a future release. + +**Removed**: the endpoint was removed, disabled, or hidden. + +--- + +| Status | Feature | Date | +|------------|---------------------------------------------------------------------------------------|------------| +| Deprecated | [Deprecate /v2/repositories/{namespace}](#deprecate-legacy-listnamespacerepositories) | 2025-06-27 | +| | [Create deprecation log table](#create-deprecation-log-table) | 2025-06-27 | +| Removed | [Docker Hub API v1 deprecation](#docker-hub-api-v1-deprecation) | 2022-08-23 | + +--- + +### Deprecate legacy ListNamespaceRepositories + +Deprecate undocumented endpoint `GET /v2/repositories/{namespace}` replaced by [List repositories](/reference/api/hub/latest/#tag/repositories/operation/listNamespaceRepositories). + +--- + +### Create deprecation log table + +Reformat page + +--- + +### Docker Hub API v1 deprecation + +Docker Hub API v1 has been deprecated. Use Docker Hub API v2 instead. The following API routes within the v1 path will no longer work and will return a 410 status code: * `/v1/repositories/{name}/images` @@ -20,11 +60,11 @@ The following API routes within the v1 path will no longer work and will return If you want to continue using the Docker Hub API in your current applications, update your clients to use v2 endpoints. -| **OLD** | **NEW** | -|----------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| [/v1/repositories/{name}/images](https://github.com/moby/moby/blob/v1.3.0/docs/sources/reference/api/docker-io_api.md#list-user-repository-images) | [/v2/namespaces/{namespace}/repositories/{repository}/images](/reference/api/hub/latest.md#tag/images/operation/GetNamespacesRepositoriesImages) | -| [/v1/repositories/{namespace}/{name}/images](https://github.com/moby/moby/blob/v1.3.0/docs/sources/reference/api/docker-io_api.md#list-user-repository-images) | [/v2/namespaces/{namespace}/repositories/{repository}/images](/reference/api/hub/latest.md#tag/images/operation/GetNamespacesRepositoriesImages) | -| [/v1/repositories/{name}/tags](https://github.com/moby/moby/blob/v1.8.3/docs/reference/api/registry_api.md#list-repository-tags) | [/v2/namespaces/{namespace}/repositories/{repository}/tags](/reference/api/hub/latest.md#tag/repositories/paths/~1v2~1namespaces~1%7Bnamespace%7D~1repositories~1%7Brepository%7D~1tags/get) | -| [/v1/repositories/{namespace}/{name}/tags](https://github.com/moby/moby/blob/v1.8.3/docs/reference/api/registry_api.md#list-repository-tags) | [/v2/namespaces/{namespace}/repositories/{repository}/tags](/reference/api/hub/latest.md#tag/repositories/paths/~1v2~1namespaces~1%7Bnamespace%7D~1repositories~1%7Brepository%7D~1tags/get) | -| [/v1/repositories/{namespace}/{name}/tags](https://github.com/moby/moby/blob/v1.8.3/docs/reference/api/registry_api.md#get-image-id-for-a-particular-tag) | [/v2/namespaces/{namespace}/repositories/{repository}/tags/{tag}](/reference/api/hub/latest.md#tag/repositories/paths/~1v2~1namespaces~1%7Bnamespace%7D~1repositories~1%7Brepository%7D~1tags~1%7Btag%7D/get) | -| [/v1/repositories/{namespace}/{name}/tags/{tag_name}](https://github.com/moby/moby/blob/v1.8.3/docs/reference/api/registry_api.md#get-image-id-for-a-particular-tag) | [/v2/namespaces/{namespace}/repositories/{repository}/tags/{tag}](/reference/api/hub/latest.md#tag/repositories/paths/~1v2~1namespaces~1%7Bnamespace%7D~1repositories~1%7Brepository%7D~1tags~1%7Btag%7D/get) | +| **OLD** | **NEW** | +|----------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------| +| [/v1/repositories/{name}/tags](https://github.com/moby/moby/blob/v1.8.3/docs/reference/api/registry_api.md#list-repository-tags) | [/v2/namespaces/{namespace}/repositories/{repository}/tags](/reference/api/hub/latest/#tag/repositories/operation/ListRepositoryTags) | +| [/v1/repositories/{namespace}/{name}/tags](https://github.com/moby/moby/blob/v1.8.3/docs/reference/api/registry_api.md#list-repository-tags) | [/v2/namespaces/{namespace}/repositories/{repository}/tags](/reference/api/hub/latest.md/#tag/repositories/operation/ListRepositoryTags) | +| [/v1/repositories/{namespace}/{name}/tags](https://github.com/moby/moby/blob/v1.8.3/docs/reference/api/registry_api.md#get-image-id-for-a-particular-tag) | [/v2/namespaces/{namespace}/repositories/{repository}/tags/{tag}](/reference/api/hub/latest/#tag/repositories/operation/GetRepositoryTag) | +| [/v1/repositories/{namespace}/{name}/tags/{tag_name}](https://github.com/moby/moby/blob/v1.8.3/docs/reference/api/registry_api.md#get-image-id-for-a-particular-tag) | [/v2/namespaces/{namespace}/repositories/{repository}/tags/{tag}](/reference/api/hub/latest/#tag/repositories/operation/GetRepositoryTag) | + +--- \ No newline at end of file diff --git a/content/reference/api/hub/dvp.yaml b/content/reference/api/hub/dvp.yaml index 8ff2030acab3..3a561a7be5c9 100644 --- a/content/reference/api/hub/dvp.yaml +++ b/content/reference/api/hub/dvp.yaml @@ -1,696 +1,5 @@ -openapi: 3.0.0 -info: - title: DVP Data API - version: 1.0.0 - x-logo: - url: https://docs.docker.com/assets/images/logo-docker-main.png - href: /reference - description: | - The Docker DVP Data API allows [Docker Verified Publishers](https://docs.docker.com/docker-hub/publish/) to view image pull analytics data for their namespaces. Analytics data can be retrieved as raw data, or in a summary format. - - #### Summary data - - In your summary data CSV, you will have access to the data points listed below. You can request summary data for a complete week (Monday through Sunday) or for a complete month (available on the first day of the following month). - - There are two levels of summary data: - - - Repository-level, a summary of every namespace and repository - - Tag- or digest-level, a summary of every namespace, repository, and reference - (tag or digest) - - The summary data formats contain the following data points: - - - Unique IP address count - - Pulls by tag count - - Pulls by digest count - - Version check count - - #### Raw data - - In your raw data CSV you will have access to the data points listed below. You can request raw data for a complete week (Monday through Sunday) or for a complete month (available on the first day of the following month). **Note:** each action is represented as a single row. - - - Type (industry) - - Host (cloud provider) - - Country (geolocation) - - Timestamp - - Namespace - - Repository - - Reference (digest is always included, tag is provided when available) - - HTTP request method - - Action, one of the following: - - Pull by tag - - Pull by digest - - Version check - - User-Agent - -servers: - - url: https://hub.docker.com/api/publisher/analytics/v1 -security: - - HubAuth: [] - -features.openapi: - schemaDefinitionsTagName: Schemas - -tags: - - name: authentication - x-displayName: Authentication Endpoints - - name: namespaces - x-displayName: Namespace data - - name: discovery - x-displayName: Discovery - - name: responseDataFile - x-displayName: ResponseDataFile - description: | - - - name: yearModel - x-displayName: Year Data Model - description: | - - - name: monthModel - x-displayName: Month Data Model - description: | - - - name: weekModel - x-displayName: Week Data Model - description: | - - -x-tagGroups: - - name: API - tags: - - authentication - - discovery - - namespaces - - name: Models - tags: - - responseDataFile - - yearModel - - monthModel - - weekModel - -paths: - /v2/users/login: - security: [] - servers: - - url: https://hub.docker.com - post: - security: [] - tags: - - authentication - summary: Create an authentication token - operationId: PostUsersLogin - description: | - Creates and returns a bearer token in JWT format that you can use to - authenticate with Docker Hub APIs. - - The returned token is used in the HTTP Authorization header like `Authorization: Bearer {TOKEN}`. - - Most Docker Hub APIs require this token either to consume or to get detailed information. For example, to list images in a private repository. - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/UsersLoginRequest" - description: Login details. - required: true - responses: - 200: - description: Authentication successful - content: - application/json: - schema: - $ref: "#/components/schemas/PostUsersLoginSuccessResponse" - 401: - description: Authentication failed or second factor required - content: - application/json: - schema: - $ref: "#/components/schemas/PostUsersLoginErrorResponse" - /v2/users/2fa-login: - security: [] - servers: - - url: https://hub.docker.com - post: - security: [] - tags: - - authentication - summary: Second factor authentication - operationId: PostUsers2FALogin - description: | - When a user has 2FA enabled, this is the second call to perform after - `/v2/users/login` call. - - Creates and returns a bearer token in JWT format that you can use to authenticate with Docker Hub APIs. - - The returned token is used in the HTTP Authorization header like `Authorization: Bearer {TOKEN}`. - - Most Docker Hub APIs require this token either to consume or to get detailed information. For example, to list images in a private repository. - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/Users2FALoginRequest" - description: Login details. - required: true - responses: - 200: - description: Authentication successful - content: - application/json: - schema: - $ref: "#/components/schemas/PostUsersLoginSuccessResponse" - 401: - description: Authentication failed or second factor required - content: - application/json: - schema: - $ref: "#/components/schemas/PostUsers2FALoginErrorResponse" - - - /: - get: - tags: [discovery] - summary: Get namespaces and repos - description: Gets a list of your namespaces and repos which have data available. - operationId: getNamespaces - responses: - '200': - description: Success - content: - application/json: - schema: - $ref: '#/components/schemas/NamespaceData' - /namespaces: - get: - tags: [discovery] - summary: Get user's namespaces - description: Get metadata associated with the namespaces the user has access to, including extra repos associated with the namespaces. - operationId: getUserNamespaces - responses: - '200': - description: Success - content: - application/json: - schema: - type: array - items: - $ref: '#/components/schemas/NamespaceMetadata' - '401': - description: Authentication failed or second factor required - /namespaces/{namespace}: - get: - tags: [discovery] - summary: Get namespace - description: Gets metadata associated with specified namespace, including extra repos associated with the namespace. - operationId: getNamespace - parameters: - - in: path - name: namespace - schema: - type: string - required: true - description: Namespace to fetch data for - responses: - '200': - description: Success - content: - application/json: - schema: - $ref: '#/components/schemas/NamespaceMetadata' - /namespaces/{namespace}/pulls: - get: - tags: [namespaces] - summary: Get pull data - description: Gets pulls for the given namespace. - operationId: getNamespacePulls - parameters: - - in: path - name: namespace - schema: - type: string - required: true - description: Namespace to fetch data for - - in: query - name: timespan - schema: - $ref: '#/components/schemas/TimespanType' - required: false - description: Timespan type for fetching data - - in: query - name: period - schema: - $ref: '#/components/schemas/PeriodType' - required: false - description: Relative period of the period to fetch data - - in: query - name: group - schema: - $ref: '#/components/schemas/GroupType' - required: false - description: Field to group the data by - responses: - '200': - description: Success - content: - application/json: - schema: - $ref: '#/components/schemas/PullData' - '404': - description: Not found - namespace doesn't exist or user does not have permission to access it - /namespaces/{namespace}/repos/{repo}/pulls: - get: - tags: [namespaces] - summary: Get pull data - description: Gets pulls for the given repo. - operationId: getRepoPulls - parameters: - - in: path - name: namespace - schema: - type: string - required: true - description: Namespace to fetch data for - - in: path - name: repo - schema: - type: string - required: true - description: Repository to fetch data for - - in: query - name: timespan - schema: - $ref: '#/components/schemas/TimespanType' - required: false - description: Timespan type for fetching data - - in: query - name: period - schema: - $ref: '#/components/schemas/PeriodType' - required: false - description: Relative period of the period to fetch data - - in: query - name: group - schema: - $ref: '#/components/schemas/GroupType' - required: false - description: Field to group the data by - responses: - '200': - description: Success - content: - application/json: - schema: - $ref: '#/components/schemas/PullData' - '404': - description: Not found - repo doesn't exist or user does not have permission to access it - /namespaces/{namespace}/pulls/exports/years: - get: - tags: [namespaces] - summary: Get years with data - description: Gets a list of years that have data for the given namespace. - operationId: getNamespaceYears - parameters: - - in: path - name: namespace - schema: - type: string - required: true - description: Namespace to fetch data for - responses: - '200': - description: Success - content: - application/json: - schema: - $ref: '#/components/schemas/YearData' - /namespaces/{namespace}/pulls/exports/years/{year}/{timespantype}: - get: - tags: [namespaces] - summary: Get timespans with data - description: Gets a list of timespans of the given type that have data for the given namespace and year. - operationId: getNamespaceTimespans - parameters: - - in: path - name: namespace - schema: - type: string - required: true - description: Namespace to fetch data for - - in: path - name: year - schema: - type: integer - required: true - description: Year to fetch data for - - in: path - name: timespantype - schema: - $ref: '#/components/schemas/TimespanType' - required: true - description: Type of timespan to fetch data for - responses: - '200': - description: Success - content: - application/json: - schema: - $ref: '#/components/schemas/TimespanData' - /namespaces/{namespace}/pulls/exports/years/{year}/{timespantype}/{timespan}: - get: - tags: [namespaces] - summary: Get namespace metadata for timespan - description: Gets info about data for the given namespace and timespan. - operationId: getNamespaceTimespanMetadata - parameters: - - in: path - name: namespace - schema: - type: string - required: true - description: Namespace to fetch data for - - in: path - name: year - schema: - type: integer - required: true - description: Year to fetch data for - - in: path - name: timespantype - schema: - $ref: '#/components/schemas/TimespanType' - required: true - description: Type of timespan to fetch data for - - in: path - name: timespan - schema: - type: integer - required: true - description: Timespan to fetch data for - responses: - '200': - description: Success - content: - application/json: - schema: - $ref: '#/components/schemas/TimespanModel' - '404': - description: Not Found - /namespaces/{namespace}/pulls/exports/years/{year}/{timespantype}/{timespan}/{dataview}: - get: - tags: [namespaces] - summary: Get namespace data for timespan - description: Gets a list of URLs that can be used to download the pull data for the given namespace and timespan. - operationId: getNamespaceDataByTimespan - parameters: - - in: path - name: namespace - schema: - type: string - required: true - description: Namespace to fetch data for - - in: path - name: year - schema: - type: integer - required: true - description: Year to fetch data for - - in: path - name: timespantype - schema: - $ref: '#/components/schemas/TimespanType' - required: true - description: Type of timespan to fetch data for - - in: path - name: timespan - schema: - type: integer - required: true - description: Timespan to fetch data for - - in: path - name: dataview - schema: - $ref: '#/components/schemas/DataviewType' - required: true - description: Type of data to fetch - responses: - '200': - description: Success - content: - application/json: - schema: - $ref: '#/components/schemas/ResponseData' - /repos/pulls: - get: - tags: [namespaces] - summary: Get pull data for multiple repos - description: Gets pull for the given repos. - operationId: getManyReposPulls - parameters: - - in: query - name: repos - schema: - type: array - items: - type: string - required: true - description: Repositories to fetch data for (maximum of 50 repositories per request). - - in: query - name: timespan - schema: - $ref: '#/components/schemas/TimespanType' - required: false - description: Timespan type for fetching data - - in: query - name: period - schema: - $ref: '#/components/schemas/PeriodType' - required: false - description: Relative period of the period to fetch data - - in: query - name: group - schema: - $ref: '#/components/schemas/GroupType' - required: false - description: Field to group the data by - responses: - '200': - description: Success - content: - application/json: - schema: - $ref: '#/components/schemas/ReposPullData' - -components: - schemas: - UsersLoginRequest: - description: User login details - type: object - required: - - username - - password - properties: - username: - description: The username of the Docker Hub account to authenticate with. - type: string - example: myusername - password: - description: - The password or personal access token (PAT) of the Docker Hub - account to authenticate with. - type: string - example: hunter2 - PostUsersLoginSuccessResponse: - description: successful user login response - type: object - properties: - token: - description: | - Created authentication token. - - This token can be used in the HTTP Authorization header as a JWT to authenticate with the Docker Hub APIs. - type: string - example: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c - nullable: false - PostUsersLoginErrorResponse: - description: failed user login response or second factor required - type: object - required: - - detail - properties: - detail: - description: Description of the error. - type: string - example: Incorrect authentication credentials - nullable: false - login_2fa_token: - description: - Short-lived token to be used on `/v2/users/2fa-login` to - complete the authentication. This field is present only if 2FA is - enabled. - type: string - example: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c - nullable: true - Users2FALoginRequest: - description: Second factor user login details - type: object - required: - - login_2fa_token - - code - properties: - login_2fa_token: - description: The intermediate 2FA token returned from `/v2/users/login` API. - type: string - example: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c - code: - description: - The Time-based One-Time Password of the Docker Hub account to - authenticate with. - type: string - example: 123456 - PostUsers2FALoginErrorResponse: - description: failed second factor login response. - type: object - properties: - detail: - description: Description of the error. - type: string - example: Incorrect authentication credentials - nullable: false - - ResponseData: - properties: - data: - type: array - description: | - List of urls to download the data. When the data is large, the data will be split into multiple files. - items: - $ref: '#/components/schemas/ResponseDataFile' - ResponseDataFile: - properties: - url: - type: string - size: - type: integer - format: int64 - NamespaceData: - properties: - namespaces: - type: array - items: - type: string - NamespaceMetadata: - properties: - namespace: - type: string - extraRepos: - type: array - items: - type: string - datasets: - type: array - items: - $ref: '#/components/schemas/DatasetModel' - DatasetModel: - properties: - name: - $ref: '#/components/schemas/DatasetType' - views: - type: array - items: - $ref: '#/components/schemas/DataviewType' - timespans: - type: array - items: - $ref: '#/components/schemas/TimespanType' - PullData: - properties: - pulls: - type: array - items: - $ref: '#/components/schemas/PullModel' - ReposPullData: - properties: - repos: - type: object - additionalProperties: - $ref: '#/components/schemas/PullData' - PullModel: - properties: - start: - type: string - end: - type: string - repo: - type: string - namespace: - type: string - pullCount: - type: integer - ipCount: - type: integer - country: - type: string - - YearData: - properties: - years: - type: array - items: - $ref: '#/components/schemas/YearModel' - YearModel: - properties: - year: - type: integer - MonthData: - properties: - months: - type: array - items: - $ref: '#/components/schemas/MonthModel' - MonthModel: - properties: - month: - type: integer - WeekData: - properties: - weeks: - type: array - items: - $ref: '#/components/schemas/WeekModel' - WeekModel: - properties: - week: - type: integer - TimespanType: - type: string - enum: [months,weeks] - PeriodType: - type: string - enum: [last-2-months,last-3-months,last-6-months,last-12-months] - DataviewType: - type: string - enum: [raw,summary,repo-summary,namespace-summary] - DatasetType: - type: string - enum: [pulls] - TimespanModel: - oneOf: - - $ref: '#/components/schemas/MonthModel' - - $ref: '#/components/schemas/WeekModel' - TimespanData: - oneOf: - - $ref: '#/components/schemas/MonthData' - - $ref: '#/components/schemas/WeekData' - GroupType: - type: string - enum: [repo,namespace] - securitySchemes: - HubAuth: - type: https - scheme: bearer - bearerFormat: JWT - description: | - JWT Bearer Authentication is required to access the Docker DVP Data API. - - This authentication documentation is duplicated from the [Hub API Authentication docs](https://docs.docker.com/reference/api/hub/#tag/authentication) - x-displayName: Docker Hub Authentication +--- +outputs: + - redirect +url: /reference/api/dvp/latest.yaml +--- diff --git a/content/reference/api/hub/latest.md b/content/reference/api/hub/latest.md index a9cd8fcefff5..15fd57a3db7f 100644 --- a/content/reference/api/hub/latest.md +++ b/content/reference/api/hub/latest.md @@ -2,5 +2,6 @@ layout: api description: Reference documentation and Swagger (OpenAPI) specification for the Docker Hub API. title: Docker Hub API reference -linkTitle: Docker Hub API +linkTitle: Latest +weight: 1 --- diff --git a/content/reference/api/hub/latest.yaml b/content/reference/api/hub/latest.yaml index 86d2f3b30dc7..34332990fc7c 100644 --- a/content/reference/api/hub/latest.yaml +++ b/content/reference/api/hub/latest.yaml @@ -1,7 +1,9 @@ -openapi: 3.0.0 +# yaml-language-server: $schema=https://raw.githubusercontent.com/OAI/OpenAPI-Specification/refs/heads/main/schemas/v3.0/schema.yaml + +openapi: 3.0.3 info: title: Docker HUB API - version: beta + version: 2-beta x-logo: url: https://docs.docker.com/assets/images/logo-docker-main.png href: /reference @@ -18,6 +20,10 @@ servers: x-audience: public url: https://hub.docker.com tags: + - name: changelog + x-displayName: Changelog + description: | + See the [Changelog](/reference/api/hub/changelog) for a summary of changes across Docker Hub API versions. - name: resources x-displayName: Resources description: | @@ -34,9 +40,9 @@ tags: - `X-RateLimit-Remaining` - The remaining amount of calls within the limit period. - `X-RateLimit-Reset` - The unix timestamp of when the remaining resets. - If you have hit the limit, you will receive a response status of `429` and the `X-Retry-After` header in the response. + If you have hit the limit, you will receive a response status of `429` and the `Retry-After` header in the response. - The `X-Retry-After` header is a unix timestamp of when you can call the API again. + The [`Retry-After` header](https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers/Retry-After) specifies the number of seconds to wait until you can call the API again. **Note**: These rate limits are separate from anti-abuse and Docker Hub download, or pull rate limiting. To learn more about Docker Hub pull rate limiting, see [Usage and limits](https://docs.docker.com/docker-hub/usage/). @@ -45,13 +51,57 @@ tags: description: | Most Docker Hub API endpoints require you to authenticate using your Docker credentials before using them. - Additionally, similar to the Docker Hub UI features, API endpoint responses may vary depending on your plan (Personal, Pro, or Team) and your account's permissions. + Additionally, similar to the Docker Hub UI features, API endpoint responses may vary depending on your subscription (Personal, Pro, or Team) and your account's permissions. + + To learn more about the features available in each subscription and to upgrade your existing subscription, see [Docker Pricing](https://www.docker.com/pricing). + + # Types + + The Docker Hub API supports the following authentication types. + + You must use each authentication type with the [Create access token](#tag/authentication-api/operation/AuthCreateAccessToken) route to obtain a bearer token. + + ## Password + Using a username and password is the most powerful, yet least secure way + to authenticate with Docker as a user. It allows access to resources + for the user without scopes. + + _In general, it is recommended to use a personal access token (PAT) instead._ + + _**The password authentication type is not available if your organization has SSO enforced.**_ + + ## Personal Access Token (PAT) + Using a username and PAT is the most secure way to authenticate with + Docker as a user. PATs are scoped to specific resources and scopes. + + Currently, a PAT is a more secure password due to limited functionality. + In the future, we may add fine-grained access like organization + access tokens for enhanced usage and security. - To learn more about the features available in each plan and to upgrade your existing plan, see [Docker Pricing](https://www.docker.com/pricing). + ## Organization Access Token (OAT) + Organization access tokens are scoped to specific resources and scopes + in an organization. They are managed by organization owners. + + These tokens are meant for automation and are not meant to be used by + users. + + # Labels + + These labels will show up on routes in this reference that allow for use of bearer + tokens issued from them. + + + + - name: authentication-api + x-displayName: Authentication + description: | + The authentication endpoints allow you to authenticate with Docker Hub APIs. + + For more information, see [Authentication](#tag/authentication). - name: access-tokens x-displayName: Personal Access Tokens description: | - The Personal Access Token endpoints lets you manage personal access tokens. For more information, see [Access Tokens](https://docs.docker.com/security/for-developers/access-tokens/). + The Personal Access Token endpoints lets you manage personal access tokens. For more information, see [Access Tokens](https://docs.docker.com/security/access-tokens/). You can use a personal access token instead of a password in the [Docker CLI](https://docs.docker.com/engine/reference/commandline/cli/) or in the [Create an authentication token](#operation/PostUsersLogin) route to obtain a bearer token. @@ -67,7 +117,7 @@ tags: description: | The Audit Logs API endpoints allow you to query audit log events across a namespace. - For more information, see [Audit Log](https://docs.docker.com/admin/organization/activity-logs/). + For more information, see [Audit Logs](https://docs.docker.com/admin/organization/activity-logs/). - name: org-settings x-displayName: Org Settings description: | @@ -104,47 +154,57 @@ tags: SCIM is a provisioning system that lets you manage users within your identity provider (IdP). For more information, see [System for Cross-domain Identity management](https://docs.docker.com/security/for-admins/provisioning/scim/). + - name: org-access-tokens + x-displayName: Organization Access Tokens + x-audience: public + description: | + The organization access token endpoints allow you to manage organization access tokens (OATs). See [Organization access tokens](https://docs.docker.com/security/for-admins/access-tokens/) for more information. paths: /v2/users/login: post: tags: - - authentication + - authentication-api summary: Create an authentication token operationId: PostUsersLogin + security: [] + deprecated: true description: | Creates and returns a bearer token in JWT format that you can use to authenticate with Docker Hub APIs. The returned token is used in the HTTP Authorization header like `Authorization: Bearer {TOKEN}`. - Most Docker Hub APIs require this token either to consume or to get detailed information. For example, to list images in a private repository. + _**As of September 16, 2024, this route requires a personal access token (PAT) instead of a password if your organization has SSO enforced.**_ - _**As of September 16, 2024, this route requires a PAT instead of a password if your organization has SSO enforced.**_ +
+ Deprecated: Use [Create access token] instead. +
requestBody: content: application/json: schema: - $ref: '#/components/schemas/UsersLoginRequest' + $ref: "#/components/schemas/UsersLoginRequest" description: Login details. required: true responses: - '200': + "200": description: Authentication successful content: application/json: schema: - $ref: '#/components/schemas/PostUsersLoginSuccessResponse' - '401': + $ref: "#/components/schemas/PostUsersLoginSuccessResponse" + "401": description: Authentication failed or second factor required content: application/json: schema: - $ref: '#/components/schemas/PostUsersLoginErrorResponse' + $ref: "#/components/schemas/PostUsersLoginErrorResponse" /v2/users/2fa-login: post: tags: - - authentication + - authentication-api summary: Second factor authentication operationId: PostUsers2FALogin + security: [] description: | When a user has two-factor authentication (2FA) enabled, this is the second call to perform after `/v2/users/login` call. @@ -157,50 +217,100 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/Users2FALoginRequest' + $ref: "#/components/schemas/Users2FALoginRequest" description: Login details. required: true responses: - '200': + "200": description: Authentication successful content: application/json: schema: - $ref: '#/components/schemas/PostUsersLoginSuccessResponse' - '401': - description: Authentication failed or second factor required + $ref: "#/components/schemas/PostUsersLoginSuccessResponse" + "401": + description: Authentication failed content: application/json: schema: - $ref: '#/components/schemas/PostUsers2FALoginErrorResponse' + $ref: "#/components/schemas/PostUsers2FALoginErrorResponse" + /v2/auth/token: + post: + tags: + - authentication-api + security: [] + summary: Create access token + operationId: AuthCreateAccessToken + description: | + Creates and returns a short-lived access token in JWT format for use as a bearer when calling Docker APIs. + + If successful, the access token returned should be used in the HTTP Authorization header like + `Authorization: Bearer {access_token}`. + + _**If your organization has SSO enforced, you must use a personal access token (PAT) instead of a password.**_ + requestBody: + content: + application/json: + schema: + description: Request to create access token + type: object + required: + - identifier + - secret + properties: + identifier: + description: | + The identifier of the account to create an access token for. If using a password or personal access token, + this must be a username. If using an organization access token, this must be an organization name. + type: string + example: myusername + secret: + description: | + The secret of the account to create an access token for. This can be a password, personal access token, or + organization access token. + type: string + example: dckr_pat_124509ugsdjga93 + responses: + "200": + description: Token created + content: + application/json: + schema: + $ref: "#/components/schemas/AuthCreateTokenResponse" + "401": + description: Authentication failed + $ref: "#/components/responses/unauthorized" /v2/access-tokens: post: - summary: Create a personal access token + summary: Create personal access token description: Creates and returns a personal access token. tags: - access-tokens + security: + - bearerAuth: [] requestBody: content: application/json: schema: - $ref: '#/components/schemas/createAccessTokenRequest' + $ref: "#/components/schemas/createAccessTokenRequest" required: true responses: - '201': + "201": description: Created content: application/json: schema: - $ref: '#/components/schemas/createAccessTokensResponse' - '400': - $ref: '#/components/responses/BadRequest' - '401': - $ref: '#/components/responses/Unauthorized' + $ref: "#/components/schemas/createAccessTokensResponse" + "400": + $ref: "#/components/responses/BadRequest" + "401": + $ref: "#/components/responses/Unauthorized" get: - summary: Get a list of personal access tokens + summary: List personal access tokens description: Returns a paginated list of personal access tokens. tags: - access-tokens + security: + - bearerAuth: [] parameters: - in: query name: page @@ -213,16 +323,16 @@ paths: type: number default: 10 responses: - '200': + "200": description: OK content: application/json: schema: - $ref: '#/components/schemas/getAccessTokensResponse' - '400': - $ref: '#/components/responses/BadRequest' - '401': - $ref: '#/components/responses/Unauthorized' + $ref: "#/components/schemas/getAccessTokensResponse" + "400": + $ref: "#/components/responses/BadRequest" + "401": + $ref: "#/components/responses/Unauthorized" /v2/access-tokens/{uuid}: parameters: - in: path @@ -231,75 +341,179 @@ paths: schema: type: string patch: - summary: Update a personal access token + summary: Update personal access token description: | Updates a personal access token partially. You can either update the token's label or enable/disable it. tags: - access-tokens + security: + - bearerAuth: [] requestBody: content: application/json: schema: - $ref: '#/components/schemas/patchAccessTokenRequest' + $ref: "#/components/schemas/patchAccessTokenRequest" required: true responses: - '200': + "200": description: OK content: application/json: schema: - $ref: '#/components/schemas/patchAccessTokenResponse' - '400': - $ref: '#/components/responses/BadRequest' - '401': - $ref: '#/components/responses/Unauthorized' + $ref: "#/components/schemas/patchAccessTokenResponse" + "400": + $ref: "#/components/responses/BadRequest" + "401": + $ref: "#/components/responses/Unauthorized" get: - summary: Get a personal access token + summary: Get personal access token description: Returns a personal access token by UUID. tags: - access-tokens + security: + - bearerAuth: [] responses: - '200': + "200": description: OK content: application/json: schema: allOf: - - $ref: '#/components/schemas/accessToken' + - $ref: "#/components/schemas/accessToken" - type: object properties: token: type: string - example: '' - '401': - $ref: '#/components/responses/Unauthorized' - '404': - $ref: '#/components/responses/NotFound' + example: "" + "401": + $ref: "#/components/responses/Unauthorized" + "404": + $ref: "#/components/responses/NotFound" delete: - summary: Delete a personal access token + summary: Delete personal access token description: | Deletes a personal access token permanently. This cannot be undone. tags: - access-tokens + security: + - bearerAuth: [] responses: - '204': + "204": description: A successful response. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - $ref: '#/components/responses/NotFound' + "401": + $ref: "#/components/responses/Unauthorized" + "404": + $ref: "#/components/responses/NotFound" + /v2/auditlogs/{account}/actions: + get: + summary: List audit log actions + description: | + List audit log actions for a namespace to be used as a filter for querying audit log events. + + + operationId: AuditLogs_ListAuditActions + security: + - bearerAuth: [] + responses: + "200": + description: A successful response. + content: + application/json: + schema: + $ref: "#/components/schemas/GetAuditActionsResponse" + examples: + response: + value: + actions: + org: + actions: + - name: team.create + description: contains team create events + label: Team Created + - name: team.delete + description: contains team delete events + label: Team Deleted + - name: team.member.add + description: contains team member add events + label: Team Member Added + - name: team.member.remove + description: contains team member remove events + label: Team Member Removed + - name: team.member.invite + description: contains team member invite events + label: Team Member Invited + - name: member.removed + description: contains org member remove events + label: Organization Member Removed + - name: create + description: contains organization create events + label: Organization Created + label: Organization + repo: + actions: + - name: create + description: contains repository create events + label: Repository Created + - name: delete + description: contains repository delete events + label: Repository Deleted + - name: change_privacy + description: contains repository privacy change events + label: Privacy Changed + - name: tag.push + description: contains image tag push events + label: Tag Pushed + - name: tag.delete + description: contains image tag delete events + label: Tag Deleted + label: Repository + "429": + description: "" + content: + application/json: + schema: {} + examples: + response: + value: + detail: Rate limit exceeded + error: false + "500": + description: "" + content: + application/json: + schema: {} + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: "#/components/schemas/rpcStatus" + parameters: + - name: account + description: Namespace to query audit log actions for. + in: path + required: true + schema: + type: string + tags: + - audit-logs /v2/auditlogs/{account}: get: - summary: Returns list of audit log events - description: Get audit log events for a given namespace. - operationId: AuditLogs_GetAuditLogs + summary: List audit log events + description: | + List audit log events for a given namespace. + + + operationId: AuditLogs_ListAuditLogs + security: + - bearerAuth: [] responses: - '200': + "200": description: A successful response. content: application/json: schema: - $ref: '#/components/schemas/GetAuditLogsResponse' + $ref: "#/components/schemas/GetAuditLogsResponse" examples: response: value: @@ -311,11 +525,11 @@ paths: data: digest: sha256:c1ae9c435032a276f80220c7d9b40f76266bbe79243d34f9cda30b76fe114dfa tag: latest - timestamp: '2021-02-19T01:34:35Z' + timestamp: "2021-02-19T01:34:35Z" action_description: | pushed the tag latest with the digest sha256:c1ae9c435032a to the repository docker/example - '429': - description: '' + "429": + description: "" content: application/json: schema: {} @@ -324,8 +538,8 @@ paths: value: detail: Rate limit exceeded error: false - '500': - description: '' + "500": + description: "" content: application/json: schema: {} @@ -334,7 +548,7 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/rpcStatus' + $ref: "#/components/schemas/rpcStatus" parameters: - name: account description: Namespace to query audit logs for. @@ -395,95 +609,6 @@ paths: default: 25 tags: - audit-logs - /v2/auditlogs/{account}/actions: - get: - summary: Returns list of audit log actions - description: | - Get audit log actions for a namespace to be used as a filter for querying audit events. - operationId: AuditLogs_GetAuditActions - responses: - '200': - description: A successful response. - content: - application/json: - schema: - $ref: '#/components/schemas/GetAuditActionsResponse' - examples: - response: - value: - actions: - org: - actions: - - name: team.create - description: contains team create events - label: Team Created - - name: team.delete - description: contains team delete events - label: Team Deleted - - name: team.member.add - description: contains team member add events - label: Team Member Added - - name: team.member.remove - description: contains team member remove events - label: Team Member Removed - - name: team.member.invite - description: contains team member invite events - label: Team Member Invited - - name: member.removed - description: contains org member remove events - label: Organization Member Removed - - name: create - description: contains organization create events - label: Organization Created - label: Organization - repo: - actions: - - name: create - description: contains repository create events - label: Repository Created - - name: delete - description: contains repository delete events - label: Repository Deleted - - name: change_privacy - description: contains repository privacy change events - label: Privacy Changed - - name: tag.push - description: contains image tag push events - label: Tag Pushed - - name: tag.delete - description: contains image tag delete events - label: Tag Deleted - label: Repository - '429': - description: '' - content: - application/json: - schema: {} - examples: - response: - value: - detail: Rate limit exceeded - error: false - '500': - description: '' - content: - application/json: - schema: {} - default: - description: An unexpected error response. - content: - application/json: - schema: - $ref: '#/components/schemas/rpcStatus' - parameters: - - name: account - description: Namespace to query audit log actions for. - in: path - required: true - schema: - type: string - tags: - - audit-logs /v2/orgs/{name}/settings: parameters: - in: path @@ -498,30 +623,34 @@ paths: Returns organization settings by name. tags: - org-settings + security: + - bearerAuth: [] responses: - '200': + "200": description: OK content: application/json: schema: - $ref: '#/components/schemas/orgSettings' - '401': - $ref: '#/components/responses/Unauthorized' - '403': - $ref: '#/components/responses/Forbidden' - '404': - $ref: '#/components/responses/NotFound' + $ref: "#/components/schemas/orgSettings" + "401": + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" + "404": + $ref: "#/components/responses/NotFound" put: summary: Update organization settings description: | - Updates an organization's settings. Some settings are only used when the organization is on a business plan. + Updates an organization's settings. Some settings are only used when the organization is on a business subscription. ***Only users with administrative privileges for the organization (owner role) can modify these settings.*** - The following settings are only used on a business plan: + The following settings are only used on a business subscription: - `restricted_images` tags: - org-settings + security: + - bearerAuth: [] requestBody: content: application/json: @@ -531,7 +660,7 @@ paths: properties: restricted_images: allOf: - - $ref: '#/components/schemas/restricted_images' + - $ref: "#/components/schemas/restricted_images" - type: object required: - enabled @@ -539,129 +668,471 @@ paths: - allow_verified_publishers required: true responses: - '200': + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/orgSettings" + "401": + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" + "404": + $ref: "#/components/responses/NotFound" + /v2/orgs/{name}/access-tokens: + post: + summary: Create access token + description: | + Create an access token for an organization. + tags: + - org-access-tokens + security: + - bearerAuth: [] + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/createOrgAccessTokenRequest" + required: true + responses: + "201": + description: Created + content: + application/json: + schema: + $ref: "#/components/schemas/createOrgAccessTokenResponse" + "400": + $ref: "#/components/responses/BadRequest" + "401": + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" + "404": + $ref: "#/components/responses/NotFound" + get: + summary: List access tokens + description: | + List access tokens for an organization. + tags: + - org-access-tokens + security: + - bearerAuth: [] + parameters: + - in: query + name: page + schema: + type: number + default: 1 + - in: query + name: page_size + schema: + type: number + default: 10 + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/getOrgAccessTokensResponse" + "401": + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" + "404": + $ref: "#/components/responses/NotFound" + + /v2/orgs/{org_name}/access-tokens/{access_token_id}: + parameters: + - $ref: "#/components/parameters/org_name" + - in: path + name: access_token_id + required: true + schema: + type: string + description: The ID of the access token to retrieve + example: "a7a5ef25-8889-43a0-8cc7-f2a94268e861" + get: + summary: Get access token + description: | + Get details of a specific access token for an organization. + tags: + - org-access-tokens + security: + - bearerAuth: [] + responses: + "200": description: OK content: application/json: schema: - $ref: '#/components/schemas/orgSettings' - '401': - $ref: '#/components/responses/Unauthorized' - '403': - $ref: '#/components/responses/Forbidden' - '404': - $ref: '#/components/responses/NotFound' + $ref: "#/components/schemas/getOrgAccessTokenResponse" + "401": + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" + "404": + $ref: "#/components/responses/NotFound" + patch: + summary: Update access token + description: | + Update a specific access token for an organization. + tags: + - org-access-tokens + security: + - bearerAuth: [] + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/updateOrgAccessTokenRequest" + required: true + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/updateOrgAccessTokenResponse" + "401": + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" + "404": + $ref: "#/components/responses/NotFound" + delete: + summary: Delete access token + description: | + Delete a specific access token for an organization. This action cannot be undone. + tags: + - org-access-tokens + security: + - bearerAuth: [] + responses: + "204": + description: Access token deleted successfully + "401": + $ref: "#/components/responses/Unauthorized" + "403": + $ref: "#/components/responses/Forbidden" + "404": + $ref: "#/components/responses/NotFound" /v2/namespaces/{namespace}/repositories/{repository}/tags: parameters: - - $ref: '#/components/parameters/namespace' - - $ref: '#/components/parameters/repository' + - $ref: "#/components/parameters/namespace" + - $ref: "#/components/parameters/repository" get: + operationId: ListRepositoryTags summary: List repository tags tags: - repositories + security: + - bearerAuth: [] + parameters: + - in: query + name: page + required: false + schema: + type: integer + description: Page number to get. Defaults to 1. + - in: query + name: page_size + required: false + schema: + type: integer + description: Number of items to get per page. Defaults to 10. Max of 100. + responses: + "200": + $ref: "#/components/responses/list_tags" + "403": + $ref: "#/components/responses/Forbidden" + "404": + $ref: "#/components/responses/NotFound" + head: + summary: Check repository tags + tags: + - repositories + security: + - bearerAuth: [] + responses: + "200": + description: Repository contains tags + "403": + $ref: "#/components/responses/Forbidden" + "404": + $ref: "#/components/responses/NotFound" + /v2/namespaces/{namespace}/repositories/{repository}/tags/{tag}: + parameters: + - $ref: "#/components/parameters/namespace" + - $ref: "#/components/parameters/repository" + - $ref: "#/components/parameters/tag" + get: + operationId: GetRepositoryTag + summary: Read repository tag + tags: + - repositories + security: + - bearerAuth: [] + responses: + "200": + $ref: "#/components/responses/get_tag" + "403": + $ref: "#/components/responses/Forbidden" + "404": + $ref: "#/components/responses/NotFound" + head: + summary: Check repository tag + tags: + - repositories + security: + - bearerAuth: [] + responses: + "200": + description: Repository tag exists + "403": + $ref: "#/components/responses/Forbidden" + "404": + $ref: "#/components/responses/NotFound" + /v2/repositories/{namespace}/{repository}/groups: + parameters: + - $ref: "#/components/parameters/namespace" + - $ref: "#/components/parameters/repository" + post: + summary: Assign a group (Team) to a repository for access + tags: + - repositories + security: + - bearerAuth: [] + parameters: + - in: query + name: group_name + required: true + schema: + type: string + description: Name of the group (team) in the organization. + - in: query + name: permission + required: true + schema: + type: string + description: | + Access level for the group. Possible values: + - `read` + - `write` + - `admin` + responses: + "200": + $ref: "#/components/responses/team_repo" + "403": + $ref: "#/components/responses/forbidden" + "404": + $ref: "#/components/responses/NotFound" + /v2/namespaces/{namespace}/repositories: + parameters: + - $ref: "#/components/parameters/namespace" + get: + operationId: listNamespaceRepositories + summary: List repositories in a namespace + description: | + Returns a list of repositories within the specified namespace (organization or user). + + Public repositories are accessible to everyone, while private repositories require appropriate authentication and permissions. + tags: + - repositories + security: + - bearerAuth: [ ] + - { } # Allow anonymous access for public repositories parameters: - in: query name: page required: false schema: type: integer + minimum: 1 + default: 1 description: Page number to get. Defaults to 1. - in: query name: page_size required: false schema: type: integer - description: Number of items to get per page. Defaults to 10. Max of 100. - responses: - '200': - $ref: '#/components/responses/list_tags' - '403': - $ref: '#/components/responses/Forbidden' - '404': - $ref: '#/components/responses/NotFound' - head: - summary: Check repository tags - tags: - - repositories - responses: - '200': - description: Repository contains tags - '403': - $ref: '#/components/responses/Forbidden' - '404': - $ref: '#/components/responses/NotFound' - /v2/namespaces/{namespace}/repositories/{repository}/tags/{tag}: - parameters: - - $ref: '#/components/parameters/namespace' - - $ref: '#/components/parameters/repository' - - $ref: '#/components/parameters/tag' - get: - summary: Read repository tag - tags: - - repositories - responses: - '200': - $ref: '#/components/responses/get_tag' - '403': - $ref: '#/components/responses/Forbidden' - '404': - $ref: '#/components/responses/NotFound' - head: - summary: Check repository tag - tags: - - repositories + minimum: 1 + maximum: 1000 + default: 10 + description: Number of repositories to get per page. Defaults to 10. Max of 1000. + - in: query + name: name + required: false + schema: + type: string + description: Filter repositories by name (partial match). + - in: query + name: ordering + required: false + schema: + type: string + enum: + - name + - -name + - last_updated + - -last_updated + - pull_count + - -pull_count + description: | + Order repositories by the specified field. Prefix with '-' for descending order. + Available options: + - `name` / `-name`: Repository name (ascending/descending) + - `last_updated` / `-last_updated`: Last update time (ascending/descending) + - `pull_count` / `-pull_count`: Number of pulls (ascending/descending) responses: - '200': - description: Repository tag exists - '403': - $ref: '#/components/responses/Forbidden' - '404': - $ref: '#/components/responses/NotFound' + "200": + description: List of repositories + content: + application/json: + schema: + $ref: "#/components/schemas/list_repositories_response" + examples: + repositories_list: + value: + count: 287 + next: "https://hub.docker.com/v2/namespaces/docker/repositories?page=2&page_size=2" + previous: null + results: + - name: "highland_builder" + namespace: "docker" + repository_type: "image" + status: 1 + status_description: "active" + description: "Image for performing Docker build requests" + is_private: false + star_count: 7 + pull_count: 15722123 + last_updated: "2023-06-20T10:44:45.459826Z" + last_modified: "2024-10-16T13:48:34.145251Z" + date_registered: "2015-05-19T21:13:35.937763Z" + affiliation: "" + media_types: + - "application/octet-stream" + - "application/vnd.docker.container.image.v1+json" + - "application/vnd.docker.distribution.manifest.v1+prettyjws" + content_types: + - "unrecognized" + - "image" + categories: + - name: "Languages & frameworks" + slug: "languages-and-frameworks" + - name: "Integration & delivery" + slug: "integration-and-delivery" + - name: "Operating systems" + slug: "operating-systems" + storage_size: 488723114800 + - name: "whalesay" + namespace: "docker" + repository_type: null + status: 1 + status_description: "active" + description: "An image for use in the Docker demo tutorial" + is_private: false + star_count: 757 + pull_count: 130737682 + last_updated: "2015-06-19T19:06:27.388123Z" + last_modified: "2024-10-16T13:48:34.145251Z" + date_registered: "2015-06-09T18:16:36.527329Z" + affiliation: "" + media_types: + - "application/vnd.docker.distribution.manifest.v1+prettyjws" + content_types: + - "image" + categories: + - name: "Languages & frameworks" + slug: "languages-and-frameworks" + - name: "Integration & delivery" + slug: "integration-and-delivery" + storage_size: 103666708 + "400": + description: Bad Request - Invalid request parameters + content: + application/json: + schema: + $ref: "#/components/schemas/error" + examples: + invalid_ordering: + summary: Invalid ordering value + value: + fields: + ordering: [ "Invalid ordering value. Must be one of: name, -name, last_updated, -last_updated, pull_count, -pull_count" ] + text: "Invalid ordering value" + "401": + $ref: "#/components/responses/unauthorized" + "403": + $ref: "#/components/responses/forbidden" + "404": + description: Page not found - occurs when requesting a page number `>1` that exceeds the available results + content: + application/json: + schema: + $ref: "#/components/schemas/error" + /v2/orgs/{org_name}/members: - x-audience: public parameters: - - $ref: '#/components/parameters/org_name' - - $ref: '#/components/parameters/search' - - $ref: '#/components/parameters/page' - - $ref: '#/components/parameters/page_size' - - $ref: '#/components/parameters/invites' - - $ref: '#/components/parameters/type' - - $ref: '#/components/parameters/role' + - $ref: "#/components/parameters/org_name" + - $ref: "#/components/parameters/search" + - $ref: "#/components/parameters/page" + - $ref: "#/components/parameters/page_size" + - $ref: "#/components/parameters/invites" + - $ref: "#/components/parameters/type" + - $ref: "#/components/parameters/role" get: summary: List org members description: | - Returns a list of members for an organization" + Returns a list of members for an organization. + + _The following fields are only visible to orgs with insights enabled._ + + - `last_logged_in_at` + - `last_seen_at` + - `last_desktop_version` + + To make visible, please see [View Insights for organization users](https://docs.docker.com/admin/organization/insights/#view-insights-for-organization-users). + + tags: - orgs + security: + - bearerAuth: [] responses: - '200': + "200": description: List of members content: application/json: schema: type: array items: - $ref: '#/components/schemas/org_member_paginated' - '400': - $ref: '#/components/responses/bad_request' - '401': - $ref: '#/components/responses/unauthorized' - '403': - $ref: '#/components/responses/forbidden' - '404': - $ref: '#/components/responses/not_found' + $ref: "#/components/schemas/org_member_paginated" + "400": + $ref: "#/components/responses/bad_request" + "401": + $ref: "#/components/responses/unauthorized" + "403": + $ref: "#/components/responses/forbidden" + "404": + $ref: "#/components/responses/not_found" /v2/orgs/{org_name}/members/export: - x-audience: public parameters: - - $ref: '#/components/parameters/org_name' + - $ref: "#/components/parameters/org_name" get: summary: Export org members CSV description: | Export members of an organization as a CSV + + tags: - orgs + security: + - bearerAuth: [] responses: - '200': + "200": description: Exported members content: text/csv: @@ -711,26 +1182,30 @@ paths: schema: type: string example: attachment;filename="{org_name}-members-{timestamp}.csv" - '400': - $ref: '#/components/responses/bad_request' - '401': - $ref: '#/components/responses/unauthorized' - '403': - $ref: '#/components/responses/forbidden' - '404': - $ref: '#/components/responses/not_found' + "400": + $ref: "#/components/responses/bad_request" + "401": + $ref: "#/components/responses/unauthorized" + "403": + $ref: "#/components/responses/forbidden" + "404": + $ref: "#/components/responses/not_found" /v2/orgs/{org_name}/members/{username}: x-audience: public parameters: - - $ref: '#/components/parameters/org_name' - - $ref: '#/components/parameters/username' + - $ref: "#/components/parameters/org_name" + - $ref: "#/components/parameters/username" put: summary: Update org member (role) description: | Updates the role of a member in the organization. ***Only users in the "owners" group of the organization can use this endpoint.*** + + tags: - orgs + security: + - bearerAuth: [] requestBody: required: true content: @@ -747,50 +1222,58 @@ paths: - editor - member responses: - '200': + "200": description: Member role updated content: application/json: schema: - $ref: '#/components/schemas/org_member' - '400': - $ref: '#/components/responses/bad_request' - '401': - $ref: '#/components/responses/unauthorized' - '403': - $ref: '#/components/responses/forbidden' - '404': - $ref: '#/components/responses/not_found' + $ref: "#/components/schemas/org_member" + "400": + $ref: "#/components/responses/bad_request" + "401": + $ref: "#/components/responses/unauthorized" + "403": + $ref: "#/components/responses/forbidden" + "404": + $ref: "#/components/responses/not_found" delete: summary: Remove member from org description: | Removes the member from the org, ie. all groups in the org, unless they're the last owner + + tags: - orgs + security: + - bearerAuth: [] responses: - '204': + "204": description: Member removed successfully - '400': - $ref: '#/components/responses/bad_request' - '401': - $ref: '#/components/responses/unauthorized' - '403': - $ref: '#/components/responses/forbidden' - '404': - $ref: '#/components/responses/not_found' + "400": + $ref: "#/components/responses/bad_request" + "401": + $ref: "#/components/responses/unauthorized" + "403": + $ref: "#/components/responses/forbidden" + "404": + $ref: "#/components/responses/not_found" /v2/orgs/{org_name}/invites: x-audience: public parameters: - - $ref: '#/components/parameters/org_name' + - $ref: "#/components/parameters/org_name" get: summary: List org invites description: | Return all pending invites for a given org, only team owners can call this endpoint + + tags: - invites + security: + - bearerAuth: [] responses: - '200': - description: '' + "200": + description: "" content: application/json: schema: @@ -799,24 +1282,28 @@ paths: data: type: array items: - $ref: '#/components/schemas/invite' - '401': - $ref: '#/components/responses/unauthorized' - '403': - $ref: '#/components/responses/forbidden' - '404': - $ref: '#/components/responses/not_found' + $ref: "#/components/schemas/invite" + "401": + $ref: "#/components/responses/unauthorized" + "403": + $ref: "#/components/responses/forbidden" + "404": + $ref: "#/components/responses/not_found" /v2/orgs/{org_name}/groups: x-audience: public parameters: - - $ref: '#/components/parameters/org_name' + - $ref: "#/components/parameters/org_name" get: summary: Get groups of an organization + description: | + tags: - groups + security: + - bearerAuth: [] parameters: - - $ref: '#/components/parameters/page' - - $ref: '#/components/parameters/page_size' + - $ref: "#/components/parameters/page" + - $ref: "#/components/parameters/page_size" - in: query name: username schema: @@ -828,8 +1315,8 @@ paths: type: string description: Get groups for the specified group in the organization. responses: - '200': - description: '' + "200": + description: "" content: application/json: schema: @@ -846,18 +1333,23 @@ paths: results: type: array items: - $ref: '#/components/schemas/org_group' - '401': - $ref: '#/components/responses/unauthorized' - '403': - $ref: '#/components/responses/forbidden' - '404': - $ref: '#/components/responses/not_found' + $ref: "#/components/schemas/org_group" + "401": + $ref: "#/components/responses/unauthorized" + "403": + $ref: "#/components/responses/forbidden" + "404": + $ref: "#/components/responses/not_found" post: summary: Create a new group - description: Create a new group within an organization. + description: | + Create a new group within an organization. + + tags: - groups + security: + - bearerAuth: [] requestBody: content: application/json: @@ -870,44 +1362,52 @@ paths: description: type: string responses: - '201': + "201": description: Group created successfully content: application/json: schema: - $ref: '#/components/schemas/org_group' - '400': - $ref: '#/components/responses/bad_request' - '401': - $ref: '#/components/responses/unauthorized' - '403': - $ref: '#/components/responses/forbidden' + $ref: "#/components/schemas/org_group" + "400": + $ref: "#/components/responses/bad_request" + "401": + $ref: "#/components/responses/unauthorized" + "403": + $ref: "#/components/responses/forbidden" /v2/orgs/{org_name}/groups/{group_name}: x-audience: public parameters: - - $ref: '#/components/parameters/org_name' - - $ref: '#/components/parameters/group_name' + - $ref: "#/components/parameters/org_name" + - $ref: "#/components/parameters/group_name" get: summary: Get a group of an organization + description: | + tags: - groups + security: + - bearerAuth: [] responses: - '200': - description: '' + "200": + description: "" content: application/json: schema: - $ref: '#/components/schemas/org_group' - '401': - $ref: '#/components/responses/unauthorized' - '403': - $ref: '#/components/responses/forbidden' - '404': - $ref: '#/components/responses/not_found' + $ref: "#/components/schemas/org_group" + "401": + $ref: "#/components/responses/unauthorized" + "403": + $ref: "#/components/responses/forbidden" + "404": + $ref: "#/components/responses/not_found" put: summary: Update the details for an organization group + description: | + tags: - groups + security: + - bearerAuth: [] requestBody: content: application/json: @@ -920,22 +1420,26 @@ paths: description: type: string responses: - '200': - description: '' + "200": + description: "" content: application/json: schema: - $ref: '#/components/schemas/org_group' - '401': - $ref: '#/components/responses/unauthorized' - '403': - $ref: '#/components/responses/forbidden' - '404': - $ref: '#/components/responses/not_found' + $ref: "#/components/schemas/org_group" + "401": + $ref: "#/components/responses/unauthorized" + "403": + $ref: "#/components/responses/forbidden" + "404": + $ref: "#/components/responses/not_found" patch: summary: Update some details for an organization group + description: | + tags: - groups + security: + - bearerAuth: [] requestBody: content: application/json: @@ -946,39 +1450,45 @@ paths: description: type: string responses: - '200': - description: '' + "200": + description: "" content: application/json: schema: - $ref: '#/components/schemas/org_group' - '401': - $ref: '#/components/responses/unauthorized' - '403': - $ref: '#/components/responses/forbidden' - '404': - $ref: '#/components/responses/not_found' + $ref: "#/components/schemas/org_group" + "401": + $ref: "#/components/responses/unauthorized" + "403": + $ref: "#/components/responses/forbidden" + "404": + $ref: "#/components/responses/not_found" delete: summary: Delete an organization group + description: | + tags: - groups + security: + - bearerAuth: [] responses: - '204': + "204": description: Group deleted successfully - '401': - $ref: '#/components/responses/unauthorized' - '403': - $ref: '#/components/responses/forbidden' - '404': - $ref: '#/components/responses/not_found' + "401": + $ref: "#/components/responses/unauthorized" + "403": + $ref: "#/components/responses/forbidden" + "404": + $ref: "#/components/responses/not_found" /v2/orgs/{org_name}/groups/{group_name}/members: x-audience: public get: + security: + - bearerAuth: [] parameters: - - $ref: '#/components/parameters/org_name' - - $ref: '#/components/parameters/group_name' - - $ref: '#/components/parameters/page' - - $ref: '#/components/parameters/page_size' + - $ref: "#/components/parameters/org_name" + - $ref: "#/components/parameters/group_name" + - $ref: "#/components/parameters/page" + - $ref: "#/components/parameters/page_size" - in: query name: search schema: @@ -988,11 +1498,13 @@ paths: description: | List the members (users) that are in a group. If user is owner of the org or has otherwise elevated permissions, they can search by email and the result will also contain emails. + + tags: - groups responses: - '200': - description: '' + "200": + description: "" content: application/json: schema: @@ -1009,52 +1521,60 @@ paths: results: type: array items: - $ref: '#/components/schemas/group_member' - '401': - $ref: '#/components/responses/unauthorized' - '403': - $ref: '#/components/responses/forbidden' - '404': - $ref: '#/components/responses/not_found' + $ref: "#/components/schemas/group_member" + "401": + $ref: "#/components/responses/unauthorized" + "403": + $ref: "#/components/responses/forbidden" + "404": + $ref: "#/components/responses/not_found" post: parameters: - - $ref: '#/components/parameters/org_name' - - $ref: '#/components/parameters/group_name' - summary: Adds a member to a group + - $ref: "#/components/parameters/org_name" + - $ref: "#/components/parameters/group_name" + summary: Add a member to a group + description: | + tags: - groups + security: + - bearerAuth: [] requestBody: - $ref: '#/components/requestBodies/add_member_to_org_group' + $ref: "#/components/requestBodies/add_member_to_org_group" responses: - '200': + "200": description: OK - '401': - $ref: '#/components/responses/unauthorized' - '403': - $ref: '#/components/responses/forbidden' - '404': - $ref: '#/components/responses/not_found' - '500': - $ref: '#/components/responses/internal_error' + "401": + $ref: "#/components/responses/unauthorized" + "403": + $ref: "#/components/responses/forbidden" + "404": + $ref: "#/components/responses/not_found" + "500": + $ref: "#/components/responses/internal_error" /v2/orgs/{org_name}/groups/{group_name}/members/{username}: x-audience: public parameters: - - $ref: '#/components/parameters/org_name' - - $ref: '#/components/parameters/group_name' - - $ref: '#/components/parameters/username' + - $ref: "#/components/parameters/org_name" + - $ref: "#/components/parameters/group_name" + - $ref: "#/components/parameters/username" delete: - summary: Removes a user from a group + summary: Remove a user from a group + description: | + tags: - groups + security: + - bearerAuth: [] responses: - '204': + "204": description: User removed successfully - '401': - $ref: '#/components/responses/unauthorized' - '403': - $ref: '#/components/responses/forbidden' - '404': - $ref: '#/components/responses/not_found' + "401": + $ref: "#/components/responses/unauthorized" + "403": + $ref: "#/components/responses/forbidden" + "404": + $ref: "#/components/responses/not_found" /v2/invites/{id}: x-audience: public parameters: @@ -1067,17 +1587,21 @@ paths: summary: Cancel an invite description: | Mark the invite as cancelled so it doesn't show up on the list of pending invites + + tags: - invites + security: + - bearerAuth: [] responses: - '204': - description: '' - '401': - $ref: '#/components/responses/unauthorized' - '403': - $ref: '#/components/responses/forbidden' - '404': - $ref: '#/components/responses/not_found' + "204": + description: "" + "401": + $ref: "#/components/responses/unauthorized" + "403": + $ref: "#/components/responses/forbidden" + "404": + $ref: "#/components/responses/not_found" /v2/invites/{id}/resend: x-audience: public parameters: @@ -1090,31 +1614,39 @@ paths: summary: Resend an invite description: | Resend a pending invite to the user, any org owner can resend an invite + + tags: - invites + security: + - bearerAuth: [] responses: - '204': - description: '' - '401': - $ref: '#/components/responses/unauthorized' - '403': - $ref: '#/components/responses/forbidden' - '404': - $ref: '#/components/responses/not_found' + "204": + description: "" + "401": + $ref: "#/components/responses/unauthorized" + "403": + $ref: "#/components/responses/forbidden" + "404": + $ref: "#/components/responses/not_found" /v2/invites/bulk: x-audience: public parameters: - - $ref: '#/components/parameters/bulk_invite' + - $ref: "#/components/parameters/bulk_invite" post: summary: Bulk create invites description: | Create multiple invites by emails or DockerIDs. Only a team owner can create invites. + + tags: - invites requestBody: - $ref: '#/components/requestBodies/bulk_invite_request' + $ref: "#/components/requestBodies/bulk_invite_request" + security: + - bearerAuth: [] responses: - '202': + "202": description: Accepted content: application/json: @@ -1122,11 +1654,11 @@ paths: type: object properties: invitees: - $ref: '#/components/schemas/bulk_invite' - '400': - $ref: '#/components/responses/bad_request' - '409': - $ref: '#/components/responses/conflict' + $ref: "#/components/schemas/bulk_invite" + "400": + $ref: "#/components/responses/bad_request" + "409": + $ref: "#/components/responses/conflict" /v2/scim/2.0/ServiceProviderConfig: x-audience: public get: @@ -1138,12 +1670,12 @@ paths: security: - bearerSCIMAuth: [] responses: - '200': - $ref: '#/components/responses/scim_get_service_provider_config_resp' - '401': - $ref: '#/components/responses/scim_unauthorized' - '500': - $ref: '#/components/responses/scim_error' + "200": + $ref: "#/components/responses/scim_get_service_provider_config_resp" + "401": + $ref: "#/components/responses/scim_unauthorized" + "500": + $ref: "#/components/responses/scim_error" /v2/scim/2.0/ResourceTypes: x-audience: public get: @@ -1155,12 +1687,12 @@ paths: security: - bearerSCIMAuth: [] responses: - '200': - $ref: '#/components/responses/scim_get_resource_types_resp' - '401': - $ref: '#/components/responses/scim_unauthorized' - '500': - $ref: '#/components/responses/scim_error' + "200": + $ref: "#/components/responses/scim_get_resource_types_resp" + "401": + $ref: "#/components/responses/scim_unauthorized" + "500": + $ref: "#/components/responses/scim_error" /v2/scim/2.0/ResourceTypes/{name}: x-audience: public get: @@ -1179,14 +1711,14 @@ paths: security: - bearerSCIMAuth: [] responses: - '200': - $ref: '#/components/responses/scim_get_resource_type_resp' - '401': - $ref: '#/components/responses/scim_unauthorized' - '404': - $ref: '#/components/responses/scim_not_found' - '500': - $ref: '#/components/responses/scim_error' + "200": + $ref: "#/components/responses/scim_get_resource_type_resp" + "401": + $ref: "#/components/responses/scim_unauthorized" + "404": + $ref: "#/components/responses/scim_not_found" + "500": + $ref: "#/components/responses/scim_error" /v2/scim/2.0/Schemas: x-audience: public get: @@ -1198,12 +1730,12 @@ paths: security: - bearerSCIMAuth: [] responses: - '200': - $ref: '#/components/responses/scim_get_schemas_resp' - '401': - $ref: '#/components/responses/scim_unauthorized' - '500': - $ref: '#/components/responses/scim_error' + "200": + $ref: "#/components/responses/scim_get_schemas_resp" + "401": + $ref: "#/components/responses/scim_unauthorized" + "500": + $ref: "#/components/responses/scim_error" /v2/scim/2.0/Schemas/{id}: x-audience: public get: @@ -1222,14 +1754,14 @@ paths: security: - bearerSCIMAuth: [] responses: - '200': - $ref: '#/components/responses/scim_get_schema_resp' - '401': - $ref: '#/components/responses/scim_unauthorized' - '404': - $ref: '#/components/responses/scim_not_found' - '500': - $ref: '#/components/responses/scim_error' + "200": + $ref: "#/components/responses/scim_get_schema_resp" + "401": + $ref: "#/components/responses/scim_unauthorized" + "404": + $ref: "#/components/responses/scim_not_found" + "500": + $ref: "#/components/responses/scim_error" /v2/scim/2.0/Users: x-audience: public get: @@ -1267,7 +1799,7 @@ paths: schema: type: integer minimum: 1 - description: '' + description: "" example: 1 - name: count in: query @@ -1275,15 +1807,15 @@ paths: type: integer minimum: 1 maximum: 200 - description: '' + description: "" example: 10 - name: filter in: query schema: type: string - description: '' + description: "" example: userName eq "jon.snow@docker.com" - - $ref: '#/components/parameters/scim_attributes' + - $ref: "#/components/parameters/scim_attributes" - name: sortOrder in: query schema: @@ -1298,18 +1830,18 @@ paths: description: User attribute to sort by. example: userName responses: - '200': - $ref: '#/components/responses/scim_get_users_resp' - '400': - $ref: '#/components/responses/scim_bad_request' - '401': - $ref: '#/components/responses/scim_unauthorized' - '403': - $ref: '#/components/responses/scim_forbidden' - '404': - $ref: '#/components/responses/scim_not_found' - '500': - $ref: '#/components/responses/scim_error' + "200": + $ref: "#/components/responses/scim_get_users_resp" + "400": + $ref: "#/components/responses/scim_bad_request" + "401": + $ref: "#/components/responses/scim_unauthorized" + "403": + $ref: "#/components/responses/scim_forbidden" + "404": + $ref: "#/components/responses/scim_not_found" + "500": + $ref: "#/components/responses/scim_error" post: summary: Create user description: | @@ -1319,26 +1851,26 @@ paths: security: - bearerSCIMAuth: [] requestBody: - $ref: '#/components/requestBodies/scim_create_user_request' + $ref: "#/components/requestBodies/scim_create_user_request" responses: - '201': - $ref: '#/components/responses/scim_create_user_resp' - '400': - $ref: '#/components/responses/scim_bad_request' - '401': - $ref: '#/components/responses/scim_unauthorized' - '403': - $ref: '#/components/responses/scim_forbidden' - '404': - $ref: '#/components/responses/scim_not_found' - '409': - $ref: '#/components/responses/scim_conflict' - '500': - $ref: '#/components/responses/scim_error' + "201": + $ref: "#/components/responses/scim_create_user_resp" + "400": + $ref: "#/components/responses/scim_bad_request" + "401": + $ref: "#/components/responses/scim_unauthorized" + "403": + $ref: "#/components/responses/scim_forbidden" + "404": + $ref: "#/components/responses/scim_not_found" + "409": + $ref: "#/components/responses/scim_conflict" + "500": + $ref: "#/components/responses/scim_error" /v2/scim/2.0/Users/{id}: x-audience: public parameters: - - $ref: '#/components/parameters/scim_user_id' + - $ref: "#/components/parameters/scim_user_id" get: summary: Get a user description: | @@ -1348,18 +1880,18 @@ paths: security: - bearerSCIMAuth: [] responses: - '200': - $ref: '#/components/responses/scim_get_user_resp' - '400': - $ref: '#/components/responses/scim_bad_request' - '401': - $ref: '#/components/responses/scim_unauthorized' - '403': - $ref: '#/components/responses/scim_forbidden' - '404': - $ref: '#/components/responses/scim_not_found' - '500': - $ref: '#/components/responses/scim_error' + "200": + $ref: "#/components/responses/scim_get_user_resp" + "400": + $ref: "#/components/responses/scim_bad_request" + "401": + $ref: "#/components/responses/scim_unauthorized" + "403": + $ref: "#/components/responses/scim_forbidden" + "404": + $ref: "#/components/responses/scim_not_found" + "500": + $ref: "#/components/responses/scim_error" put: summary: Update a user description: | @@ -1369,22 +1901,22 @@ paths: security: - bearerSCIMAuth: [] requestBody: - $ref: '#/components/requestBodies/scim_update_user_request' + $ref: "#/components/requestBodies/scim_update_user_request" responses: - '200': - $ref: '#/components/responses/scim_update_user_resp' - '400': - $ref: '#/components/responses/scim_bad_request' - '401': - $ref: '#/components/responses/scim_unauthorized' - '403': - $ref: '#/components/responses/scim_forbidden' - '404': - $ref: '#/components/responses/scim_not_found' - '409': - $ref: '#/components/responses/scim_conflict' - '500': - $ref: '#/components/responses/scim_error' + "200": + $ref: "#/components/responses/scim_update_user_resp" + "400": + $ref: "#/components/responses/scim_bad_request" + "401": + $ref: "#/components/responses/scim_unauthorized" + "403": + $ref: "#/components/responses/scim_forbidden" + "404": + $ref: "#/components/responses/scim_not_found" + "409": + $ref: "#/components/responses/scim_conflict" + "500": + $ref: "#/components/responses/scim_error" components: responses: BadRequest: @@ -1392,83 +1924,83 @@ components: content: application/json: schema: - $ref: '#/components/schemas/ValueError' + $ref: "#/components/schemas/ValueError" Unauthorized: description: Unauthorized content: application/json: schema: - $ref: '#/components/schemas/Error' + $ref: "#/components/schemas/Error" Forbidden: description: Forbidden content: application/json: schema: - $ref: '#/components/schemas/Error' + $ref: "#/components/schemas/Error" NotFound: description: Not Found content: application/json: schema: - $ref: '#/components/schemas/Error' + $ref: "#/components/schemas/Error" list_tags: description: list repository tags content: application/json: schema: - $ref: '#/components/schemas/paginated_tags' + $ref: "#/components/schemas/paginated_tags" get_tag: description: repository tag content: application/json: schema: - $ref: '#/components/schemas/tag' + $ref: "#/components/schemas/tag" bad_request: description: Bad Request content: application/json: schema: - $ref: '#/components/schemas/error' + $ref: "#/components/schemas/error" unauthorized: description: Unauthorized content: application/json: schema: - $ref: '#/components/schemas/error' + $ref: "#/components/schemas/error" forbidden: description: Forbidden content: application/json: schema: - $ref: '#/components/schemas/error' + $ref: "#/components/schemas/error" not_found: description: Not Found content: application/json: schema: - $ref: '#/components/schemas/error' + $ref: "#/components/schemas/error" conflict: description: Conflict content: application/json: schema: - $ref: '#/components/schemas/error' + $ref: "#/components/schemas/error" internal_error: description: Internal content: application/json: schema: - $ref: '#/components/schemas/error' + $ref: "#/components/schemas/error" scim_bad_request: description: Bad Request content: application/scim+json: schema: allOf: - - $ref: '#/components/schemas/scim_error' + - $ref: "#/components/schemas/scim_error" - properties: status: - example: '400' + example: "400" scimType: type: string description: Some types of errors will return this per the specification. @@ -1478,58 +2010,58 @@ components: application/scim+json: schema: allOf: - - $ref: '#/components/schemas/scim_error' + - $ref: "#/components/schemas/scim_error" - properties: status: - example: '401' + example: "401" scim_forbidden: description: Forbidden content: application/scim+json: schema: allOf: - - $ref: '#/components/schemas/scim_error' + - $ref: "#/components/schemas/scim_error" - properties: status: - example: '403' + example: "403" scim_not_found: description: Not Found content: application/scim+json: schema: allOf: - - $ref: '#/components/schemas/scim_error' + - $ref: "#/components/schemas/scim_error" - properties: status: - example: '404' + example: "404" scim_conflict: description: Conflict content: application/scim+json: schema: allOf: - - $ref: '#/components/schemas/scim_error' + - $ref: "#/components/schemas/scim_error" - properties: status: - example: '409' + example: "409" scim_error: description: Internal Error content: application/scim+json: schema: allOf: - - $ref: '#/components/schemas/scim_error' + - $ref: "#/components/schemas/scim_error" - properties: status: - example: '500' + example: "500" scim_get_service_provider_config_resp: - description: '' + description: "" content: application/scim+json: schema: - $ref: '#/components/schemas/scim_service_provider_config' + $ref: "#/components/schemas/scim_service_provider_config" scim_get_resource_types_resp: - description: '' + description: "" content: application/scim+json: schema: @@ -1546,15 +2078,15 @@ components: resources: type: array items: - $ref: '#/components/schemas/scim_resource_type' + $ref: "#/components/schemas/scim_resource_type" scim_get_resource_type_resp: - description: '' + description: "" content: application/scim+json: schema: - $ref: '#/components/schemas/scim_resource_type' + $ref: "#/components/schemas/scim_resource_type" scim_get_schemas_resp: - description: '' + description: "" content: application/scim+json: schema: @@ -1571,15 +2103,15 @@ components: resources: type: array items: - $ref: '#/components/schemas/scim_schema' + $ref: "#/components/schemas/scim_schema" scim_get_schema_resp: - description: '' + description: "" content: application/scim+json: schema: - $ref: '#/components/schemas/scim_schema' + $ref: "#/components/schemas/scim_schema" scim_get_users_resp: - description: '' + description: "" content: application/scim+json: schema: @@ -1603,26 +2135,149 @@ components: resources: type: array items: - $ref: '#/components/schemas/scim_user' + $ref: "#/components/schemas/scim_user" scim_create_user_resp: - description: '' + description: "" content: application/scim+json: schema: - $ref: '#/components/schemas/scim_user' + $ref: "#/components/schemas/scim_user" scim_get_user_resp: - description: '' + description: "" content: application/scim+json: schema: - $ref: '#/components/schemas/scim_user' + $ref: "#/components/schemas/scim_user" scim_update_user_resp: - description: '' + description: "" content: application/scim+json: schema: - $ref: '#/components/schemas/scim_user' + $ref: "#/components/schemas/scim_user" schemas: + repository_list_entry: + type: object + properties: + name: + type: string + description: Name of the repository + example: "hello-world" + namespace: + type: string + description: Namespace (organization or username) that owns the repository + example: "docker" + repository_type: + type: string + description: Type of repository + enum: + - image + - plugin + - null + example: "image" + nullable: true + status: + type: integer + description: Repository status code + example: 1 + status_description: + type: string + description: Human-readable repository status + enum: + - active + - inactive + example: "active" + description: + type: string + description: Repository description + nullable: true + example: "Hello World! (an example of minimal Dockerization)" + is_private: + type: boolean + description: Whether the repository is private + example: false + star_count: + type: integer + description: Number of users who starred this repository + minimum: 0 + example: 1234 + pull_count: + type: integer + description: Total number of pulls for this repository + minimum: 0 + example: 50000000 + last_updated: + type: string + format: date-time + description: ISO 8601 timestamp of when the repository was last updated + example: "2023-12-01T10:30:00Z" + nullable: true + last_modified: + type: string + format: date-time + description: ISO 8601 timestamp of when the repository was last modified + example: "2023-12-01T10:30:00Z" + nullable: true + date_registered: + type: string + format: date-time + description: ISO 8601 timestamp of when the repository was created + example: "2013-06-19T19:07:54Z" + affiliation: + type: string + description: User's affiliation with the repository (empty string if no affiliation) + example: "" + media_types: + type: array + description: Media types supported by this repository + items: + type: string + example: + - "application/vnd.docker.plugin.v1+json" + content_types: + type: array + description: Content types supported by this repository + items: + type: string + example: + - "plugin" + categories: + type: array + description: Categories associated with this repository + items: + $ref: "#/components/schemas/category" + example: [] + storage_size: + type: integer + description: Storage size in bytes used by this repository + minimum: 0 + example: 232719127 + category: + type: object + required: + - name + - slug + properties: + name: + type: string + description: Human-readable name of the category + example: "Databases" + minLength: 1 + slug: + type: string + description: URL-friendly identifier for the category + example: "databases" + minLength: 1 + pattern: "^[a-z0-9]+(?:-[a-z0-9]+)*$" + description: Repository category for classification and discovery + list_repositories_response: + allOf: + - $ref: "#/components/schemas/page" + - type: object + properties: + results: + type: array + items: + $ref: "#/components/schemas/repository_list_entry" UsersLoginRequest: description: User login details type: object @@ -1639,6 +2294,14 @@ components: The password or personal access token (PAT) of the Docker Hub account to authenticate with. type: string example: p@ssw0rd + AuthCreateTokenResponse: + description: successful access token response + type: object + properties: + access_token: + description: The created access token. This expires in 10 minutes. + type: string + example: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c PostUsersLoginSuccessResponse: description: successful user login response type: object @@ -1711,7 +2374,7 @@ components: details: type: array items: - $ref: '#/components/schemas/protobufAny' + $ref: "#/components/schemas/protobufAny" AuditLogAction: type: object properties: @@ -1731,7 +2394,7 @@ components: actions: type: array items: - $ref: '#/components/schemas/AuditLogAction' + $ref: "#/components/schemas/AuditLogAction" description: List of audit log actions. label: type: string @@ -1742,7 +2405,7 @@ components: actions: type: object additionalProperties: - $ref: '#/components/schemas/AuditLogActions' + $ref: "#/components/schemas/AuditLogActions" description: Map of audit log actions. description: GetAuditActions response. GetAuditLogsResponse: @@ -1751,7 +2414,7 @@ components: logs: type: array items: - $ref: '#/components/schemas/AuditLog' + $ref: "#/components/schemas/AuditLog" description: List of audit log events. description: GetAuditLogs response. AuditLog: @@ -1809,7 +2472,7 @@ components: example: some user agent created_at: type: string - example: '2021-07-20T12:00:00.000000Z' + example: "2021-07-20T12:00:00.000000Z" last_used: type: string example: null @@ -1852,8 +2515,15 @@ components: - repo:read items: type: string + expires_at: + type: string + description: | + Optional expiration date for the token. + If omitted, the token will remain valid indefinitely. + format: date-time + example: "2021-10-28T18:30:19.520861Z" createAccessTokensResponse: - $ref: '#/components/schemas/accessToken' + $ref: "#/components/schemas/accessToken" getAccessTokensResponse: type: object properties: @@ -1873,12 +2543,12 @@ components: type: array items: allOf: - - $ref: '#/components/schemas/accessToken' + - $ref: "#/components/schemas/accessToken" - type: object properties: token: type: string - example: '' + example: "" patchAccessTokenRequest: type: object properties: @@ -1891,12 +2561,12 @@ components: type: boolean example: false patchAccessTokenResponse: - $ref: '#/components/schemas/accessToken' + $ref: "#/components/schemas/accessToken" orgSettings: type: object properties: restricted_images: - $ref: '#/components/schemas/restricted_images' + $ref: "#/components/schemas/restricted_images" restricted_images: type: object properties: @@ -1944,7 +2614,7 @@ components: layers: type: array items: - $ref: '#/components/schemas/layer' + $ref: "#/components/schemas/layer" os: type: string description: operating system @@ -1965,12 +2635,12 @@ components: description: Status of the image last_pulled: type: string - example: '2021-01-05T21:06:53.506400Z' + example: "2021-01-05T21:06:53.506400Z" description: datetime of last pull nullable: true last_pushed: type: string - example: '2021-01-05T21:06:53.506400Z' + example: "2021-01-05T21:06:53.506400Z" description: datetime of last push nullable: true tag: @@ -1981,13 +2651,13 @@ components: description: tag ID images: type: object - $ref: '#/components/schemas/image' + $ref: "#/components/schemas/image" creator: type: integer description: ID of the user that pushed the tag last_updated: type: string - example: '2021-01-05T21:06:53.506400Z' + example: "2021-01-05T21:06:53.506400Z" description: datetime of last update nullable: true last_updater: @@ -2016,23 +2686,23 @@ components: description: whether a tag has been pushed to or pulled in the past month tag_last_pulled: type: string - example: '2021-01-05T21:06:53.506400Z' + example: "2021-01-05T21:06:53.506400Z" description: datetime of last pull nullable: true tag_last_pushed: type: string - example: '2021-01-05T21:06:53.506400Z' + example: "2021-01-05T21:06:53.506400Z" description: datetime of last push nullable: true paginated_tags: allOf: - - $ref: '#/components/schemas/page' + - $ref: "#/components/schemas/page" - type: object properties: results: type: array items: - $ref: '#/components/schemas/tag' + $ref: "#/components/schemas/tag" page: type: object properties: @@ -2083,13 +2753,13 @@ components: example: server scim_schema_parent_attribute: allOf: - - $ref: '#/components/schemas/scim_schema_attribute' + - $ref: "#/components/schemas/scim_schema_attribute" - type: object properties: subAttributes: type: array items: - $ref: '#/components/schemas/scim_schema_attribute' + $ref: "#/components/schemas/scim_schema_attribute" invite: type: object properties: @@ -2114,7 +2784,7 @@ components: example: owners created_at: type: string - example: '2021-10-28T18:30:19.520861Z' + example: "2021-10-28T18:30:19.520861Z" bulk_invite: type: object properties: @@ -2132,7 +2802,7 @@ components: description: status of the invite or validation error invite: description: Invite data if successfully invited - $ref: '#/components/schemas/invite' + $ref: "#/components/schemas/invite" example: invitees: - invitee: invitee@docker.com @@ -2143,7 +2813,7 @@ components: invitee: invitee@docker.com org: docker team: owners - created_at: '2021-10-28T18:30:19.520861Z' + created_at: "2021-10-28T18:30:19.520861Z" - invitee: invitee2@docker.com status: existing_org_member - invitee: invitee3@docker.com @@ -2185,7 +2855,7 @@ components: example: Docker Inc date_joined: type: string - example: '2021-01-05T21:06:53.506400Z' + example: "2021-01-05T21:06:53.506400Z" full_name: type: string example: Jon Snow @@ -2208,7 +2878,7 @@ components: example: dockeruser org_member: allOf: - - $ref: '#/components/schemas/user' + - $ref: "#/components/schemas/user" properties: email: type: string @@ -2232,13 +2902,34 @@ components: - owners is_guest: type: boolean - description: If the organization has verfied domains, members that have email addresses outside of those domains will be flagged as Guest member + description: If the organization has verfied domains, members that have email addresses outside of those domains will be flagged as guests. example: false primary_email: type: string - description: User's email primary address + description: The user's email primary address. example: example@docker.com deprecated: true + last_logged_in_at: + type: string + format: date-time + description: | + Last time the user logged in. To access this field, you must have insights visible for your organization. See + [Insights](https://docs.docker.com/admin/organization/insights/#view-insights-for-organization-users). + example: "2021-01-05T21:06:53.506400Z" + last_seen_at: + type: string + format: date-time + description: | + Last time the user was seen. To access this field, you must have insights visible for your organization. See + [Insights](https://docs.docker.com/admin/organization/insights/#view-insights-for-organization-users). + example: "2021-01-05T21:06:53.506400Z" + last_desktop_version: + type: string + description: | + Last desktop version the user used. To access this field, you must have insights visible for your organization. See + [Insights](https://docs.docker.com/admin/organization/insights/#view-insights-for-organization-users). + example: 4.29.0 + org_member_paginated: type: object properties: @@ -2258,7 +2949,7 @@ components: type: array description: List of accounts. items: - $ref: '#/components/schemas/org_member' + $ref: "#/components/schemas/org_member" org_group: type: object properties: @@ -2293,7 +2984,8 @@ components: example: Docker Inc date_joined: type: string - example: '2021-01-05T21:06:53.506400Z' + format: date-time + example: "2021-01-05T21:06:53.506400Z" full_name: type: string example: John Snow @@ -2333,7 +3025,7 @@ components: type: boolean legacy_email_address: allOf: - - $ref: '#/components/schemas/email_address' + - $ref: "#/components/schemas/email_address" - type: object properties: user: @@ -2341,7 +3033,7 @@ components: example: dockeruser email_with_username: allOf: - - $ref: '#/components/schemas/email_address' + - $ref: "#/components/schemas/email_address" - type: object properties: username: @@ -2358,7 +3050,7 @@ components: - urn:ietf:params:scim:schemas:core:2.0:ServiceProviderConfig documentationUri: type: string - example: '' + example: "" patch: properties: supported: @@ -2462,7 +3154,7 @@ components: type: array example: [] items: - $ref: '#/components/schemas/scim_schema_parent_attribute' + $ref: "#/components/schemas/scim_schema_parent_attribute" scim_email: type: object properties: @@ -2515,26 +3207,26 @@ components: type: object properties: schemas: - $ref: '#/components/schemas/scim_user_schemas' + $ref: "#/components/schemas/scim_user_schemas" id: - $ref: '#/components/schemas/scim_user_id' + $ref: "#/components/schemas/scim_user_id" userName: - $ref: '#/components/schemas/scim_user_username' + $ref: "#/components/schemas/scim_user_username" name: - $ref: '#/components/schemas/scim_user_name' + $ref: "#/components/schemas/scim_user_name" displayName: - $ref: '#/components/schemas/scim_user_display_name' + $ref: "#/components/schemas/scim_user_display_name" active: type: boolean example: true emails: type: array items: - $ref: '#/components/schemas/scim_email' + $ref: "#/components/schemas/scim_email" groups: type: array items: - $ref: '#/components/schemas/scim_group' + $ref: "#/components/schemas/scim_group" meta: type: object properties: @@ -2546,12 +3238,177 @@ components: example: https://hub.docker.com/v2/scim/2.0/Users/d80f7c79-7730-49d8-9a41-7c42fb622d9c created: type: string + format: date-time description: The creation date for the user as a RFC3339 formatted string. - example: '2022-05-20T00:54:18Z' + example: "2022-05-20T00:54:18Z" lastModified: type: string + format: date-time description: The date the user was last modified as a RFC3339 formatted string. - example: '2022-05-20T00:54:18Z' + example: "2022-05-20T00:54:18Z" + orgAccessToken: + type: object + properties: + id: + type: string + example: "a7a5ef25-8889-43a0-8cc7-f2a94268e861" + label: + type: string + example: "My organization token" + created_by: + type: string + example: "johndoe" + is_active: + type: boolean + example: true + created_at: + type: string + format: date-time + example: "2022-05-20T00:54:18Z" + expires_at: + type: string + format: date-time + example: "2023-05-20T00:54:18Z" + nullable: true + last_used_at: + type: string + format: date-time + example: "2022-06-15T12:30:45Z" + nullable: true + orgAccessTokenResource: + type: object + properties: + type: + type: string + enum: + - TYPE_REPO + - TYPE_ORG + example: "TYPE_REPO" + description: The type of resource + required: true + path: + type: string + example: "myorg/myrepo" + description: | + The path of the resource. The format of this will change depending on the type of resource. + + To reference public repositories, use `*/*/public` as the path value. + required: true + scopes: + type: array + description: The scopes this token has access to + items: + type: string + example: "repo-pull" + required: true + getOrgAccessTokensResponse: + type: object + properties: + total: + type: number + example: 10 + next: + type: string + example: https://hub.docker.com/v2/orgs/docker/access-tokens?page=2&page_size=10 + previous: + type: string + example: https://hub.docker.com/v2/orgs/docker/access-tokens?page=1&page_size=10 + results: + type: array + items: + $ref: "#/components/schemas/orgAccessToken" + getOrgAccessTokenResponse: + allOf: + - $ref: "#/components/schemas/orgAccessToken" + - type: object + properties: + resources: + type: array + description: Resources this token has access to + items: + $ref: "#/components/schemas/orgAccessTokenResource" + createOrgAccessTokenRequest: + type: object + properties: + label: + type: string + description: Label for the access token + example: "My organization token" + required: true + description: + type: string + description: Description of the access token + example: "Token for CI/CD pipeline" + resources: + type: array + description: Resources this token has access to + items: + $ref: "#/components/schemas/orgAccessTokenResource" + expires_at: + type: string + format: date-time + description: Expiration date for the token + example: "2023-05-20T00:54:18Z" + nullable: true + createOrgAccessTokenResponse: + type: object + allOf: + - $ref: "#/components/schemas/orgAccessToken" + - type: object + properties: + token: + type: string + description: The actual token value that can be used for authentication + example: "dckr_oat_7awgM4jG5SQvxcvmNzhKj8PQjxo" + resources: + type: array + items: + $ref: "#/components/schemas/orgAccessTokenResource" + updateOrgAccessTokenRequest: + type: object + properties: + label: + type: string + description: Label for the access token + example: "My organization token" + description: + type: string + description: Description of the access token + example: "Token for CI/CD pipeline" + resources: + type: array + description: Resources this token has access to + items: + $ref: "#/components/schemas/orgAccessTokenResource" + is_active: + type: boolean + description: Whether the token is active + example: true + updateOrgAccessTokenResponse: + type: object + allOf: + - $ref: "#/components/schemas/orgAccessToken" + - type: object + properties: + resources: + type: array + description: Resources this token has access to + items: + $ref: "#/components/schemas/orgAccessTokenResource" + team_repo: + allOf: + - $ref: "#/components/responses/team_repo" + properties: + group_name: + type: string + description: Name of the group + permission: + type: string + description: Repo access permission + enum: + - read + - write + - admin parameters: namespace: in: path @@ -2708,11 +3565,11 @@ components: - userName properties: schemas: - $ref: '#/components/schemas/scim_user_schemas' + $ref: "#/components/schemas/scim_user_schemas" userName: - $ref: '#/components/schemas/scim_user_username' + $ref: "#/components/schemas/scim_user_username" name: - $ref: '#/components/schemas/scim_user_name' + $ref: "#/components/schemas/scim_user_name" scim_update_user_request: required: true content: @@ -2723,10 +3580,10 @@ components: - schemas properties: schemas: - $ref: '#/components/schemas/scim_user_schemas' + $ref: "#/components/schemas/scim_user_schemas" name: allOf: - - $ref: '#/components/schemas/scim_user_name' + - $ref: "#/components/schemas/scim_user_name" - description: If this is omitted from the request, the update will skip the update on it. We will only ever change the name, but not clear it. enabled: type: boolean @@ -2744,20 +3601,31 @@ components: member: type: string example: jonsnow + securitySchemes: + bearerAuth: + type: http + scheme: bearer + bearerFormat: JWT + bearerSCIMAuth: + type: http + scheme: bearer x-tagGroups: - name: General tags: + - changelog - resources - rate-limiting + - authentication - name: API tags: - - authentication + - authentication-api - access-tokens - images - audit-logs - org-settings - repositories - scim - - orgs + - orgs + - org-access-tokens - groups - invites diff --git a/content/reference/api/registry/_index.md b/content/reference/api/registry/_index.md new file mode 100644 index 000000000000..0d376d4a28ec --- /dev/null +++ b/content/reference/api/registry/_index.md @@ -0,0 +1,5 @@ +--- +title: Registry API +build: + render: never +--- \ No newline at end of file diff --git a/content/reference/api/registry/auth.md b/content/reference/api/registry/auth.md new file mode 100644 index 000000000000..d395066014af --- /dev/null +++ b/content/reference/api/registry/auth.md @@ -0,0 +1,221 @@ +--- +title: Registry authentication +description: "Specifies the Docker Registry v2 authentication" +keywords: registry, images, tags, repository, distribution, Bearer authentication, advanced +--- + +This document outlines the registry authentication scheme: + +![v2 registry auth](./images/v2-registry-auth.png) + +1. Attempt to begin a push/pull operation with the registry. +2. If the registry requires authorization it will return a `401 Unauthorized` + HTTP response with information on how to authenticate. +3. The registry client makes a request to the authorization service for a + Bearer token. +4. The authorization service returns an opaque Bearer token representing the + client's authorized access. +5. The client retries the original request with the Bearer token embedded in + the request's Authorization header. +6. The Registry authorizes the client by validating the Bearer token and the + claim set embedded within it and begins the push/pull session as usual. + +## Requirements + +- Registry clients which can understand and respond to token auth challenges + returned by the resource server. +- An authorization server capable of managing access controls to their + resources hosted by any given service (such as repositories in a Docker + Registry). +- A Docker Registry capable of trusting the authorization server to sign tokens + which clients can use for authorization and the ability to verify these + tokens for single use or for use during a sufficiently short period of time. + +## Authorization server endpoint descriptions + +The described server is meant to serve as a standalone access control manager +for resources hosted by other services which want to authenticate and manage +authorizations using a separate access control manager. + +A service like this is used by the official Docker Registry to authenticate +clients and verify their authorization to Docker image repositories. + +As of Docker 1.6, the registry client within the Docker Engine has been updated +to handle such an authorization workflow. + +## How to authenticate + +Registry V1 clients first contact the index to initiate a push or pull. Under +the Registry V2 workflow, clients should contact the registry first. If the +registry server requires authentication it will return a `401 Unauthorized` +response with a `WWW-Authenticate` header detailing how to authenticate to this +registry. + +For example, say I (username `jlhawn`) am attempting to push an image to the +repository `samalba/my-app`. For the registry to authorize this, I will need +`push` access to the `samalba/my-app` repository. The registry will first +return this response: + +```text +HTTP/1.1 401 Unauthorized +Content-Type: application/json; charset=utf-8 +Docker-Distribution-Api-Version: registry/2.0 +Www-Authenticate: Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:samalba/my-app:pull,push" +Date: Thu, 10 Sep 2015 19:32:31 GMT +Content-Length: 235 +Strict-Transport-Security: max-age=31536000 + +{"errors":[{"code":"UNAUTHORIZED","message":"access to the requested resource is not authorized","detail":[{"Type":"repository","Name":"samalba/my-app","Action":"pull"},{"Type":"repository","Name":"samalba/my-app","Action":"push"}]}]} +``` + +Note the HTTP Response Header indicating the auth challenge: + +```text +Www-Authenticate: Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:samalba/my-app:pull,push" +``` + +This format is documented in [Section 3 of RFC 6750: The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://tools.ietf.org/html/rfc6750#section-3) + +This challenge indicates that the registry requires a token issued by the +specified token server and that the request the client is attempting will +need to include sufficient access entries in its claim set. To respond to this +challenge, the client will need to make a `GET` request to the URL +`https://auth.docker.io/token` using the `service` and `scope` values from the +`WWW-Authenticate` header. + +## Requesting a token + +Defines getting a bearer and refresh token using the token endpoint. + +### Query parameters + +#### `service` + +The name of the service which hosts the resource. + +#### `offline_token` + +Whether to return a refresh token along with the bearer token. A refresh token +is capable of getting additional bearer tokens for the same subject with +different scopes. The refresh token does not have an expiration and should be +considered completely opaque to the client. + +#### `client_id` + +String identifying the client. This `client_id` does not need to be registered +with the authorization server but should be set to a meaningful value in order +to allow auditing keys created by unregistered clients. Accepted syntax is +defined in [RFC6749 Appendix +A.1](https://tools.ietf.org/html/rfc6749#appendix-A.1). + +#### `scope` + +The resource in question, formatted as one of the space-delimited entries from +the `scope` parameters from the `WWW-Authenticate` header shown previously. This +query parameter should be specified multiple times if there is more than one +`scope` entry from the `WWW-Authenticate` header. The previous example would be +specified as: `scope=repository:samalba/my-app:push`. The scope field may be +empty to request a refresh token without providing any resource permissions to +the returned bearer token. + +### Token response fields + +#### `token` + +An opaque `Bearer` token that clients should supply to subsequent +requests in the `Authorization` header. + +#### `access_token` + +For compatibility with OAuth 2.0, the `token` under the name `access_token` is +also accepted. At least one of these fields must be specified, but both may +also appear (for compatibility with older clients). When both are specified, +they should be equivalent; if they differ the client's choice is undefined. + +#### `expires_in` + +(Optional) The duration in seconds since the token was issued that it will +remain valid. When omitted, this defaults to 60 seconds. For compatibility +with older clients, a token should never be returned with less than 60 seconds +to live. + +#### `issued_at` + +(Optional) The [RFC3339](https://www.ietf.org/rfc/rfc3339.txt)-serialized UTC +standard time at which a given token was issued. If `issued_at` is omitted, the +expiration is from when the token exchange completed. + +#### `refresh_token` + +(Optional) Token which can be used to get additional access tokens for +the same subject with different scopes. This token should be kept secure +by the client and only sent to the authorization server which issues +bearer tokens. This field will only be set when `offline_token=true` is +provided in the request. + +### Example + +For this example, the client makes an HTTP GET request to the following URL: + +```text +https://auth.docker.io/token?service=registry.docker.io&scope=repository:samalba/my-app:pull,push +``` + +The token server should first attempt to authenticate the client using any +authentication credentials provided with the request. From Docker 1.11 the +Docker Engine supports both Basic Authentication and OAuth2 for +getting tokens. Docker 1.10 and before, the registry client in the Docker Engine +only supports Basic Authentication. If an attempt to authenticate to the token +server fails, the token server should return a `401 Unauthorized` response +indicating that the provided credentials are invalid. + +Whether the token server requires authentication is up to the policy of that +access control provider. Some requests may require authentication to determine +access (such as pushing or pulling a private repository) while others may not +(such as pulling from a public repository). + +After authenticating the client (which may simply be an anonymous client if +no attempt was made to authenticate), the token server must next query its +access control list to determine whether the client has the requested scope. In +this example request, if I have authenticated as user `jlhawn`, the token +server will determine what access I have to the repository `samalba/my-app` +hosted by the entity `registry.docker.io`. + +Once the token server has determined what access the client has to the +resources requested in the `scope` parameter, it will take the intersection of +the set of requested actions on each resource and the set of actions that the +client has in fact been granted. If the client only has a subset of the +requested access **it must not be considered an error** as it is not the +responsibility of the token server to indicate authorization errors as part of +this workflow. + +Continuing with the example request, the token server will find that the +client's set of granted access to the repository is `[pull, push]` which when +intersected with the requested access `[pull, push]` yields an equal set. If +the granted access set was found only to be `[pull]` then the intersected set +would only be `[pull]`. If the client has no access to the repository then the +intersected set would be empty, `[]`. + +It is this intersected set of access which is placed in the returned token. + +The server then constructs an implementation-specific token with this +intersected set of access, and returns it to the Docker client to use to +authenticate to the audience service (within the indicated window of time): + +```text +HTTP/1.1 200 OK +Content-Type: application/json + +{"token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0.QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w", "expires_in": 3600,"issued_at": "2009-11-10T23:00:00Z"} +``` + +## Using the Bearer token + +Once the client has a token, it will try the registry request again with the +token placed in the HTTP `Authorization` header like so: + +```text +Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IkJWM0Q6MkFWWjpVQjVaOktJQVA6SU5QTDo1RU42Ok40SjQ6Nk1XTzpEUktFOkJWUUs6M0ZKTDpQT1RMIn0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJCQ0NZOk9VNlo6UUVKNTpXTjJDOjJBVkM6WTdZRDpBM0xZOjQ1VVc6NE9HRDpLQUxMOkNOSjU6NUlVTCIsImF1ZCI6InJlZ2lzdHJ5LmRvY2tlci5jb20iLCJleHAiOjE0MTUzODczMTUsIm5iZiI6MTQxNTM4NzAxNSwiaWF0IjoxNDE1Mzg3MDE1LCJqdGkiOiJ0WUpDTzFjNmNueXk3a0FuMGM3cktQZ2JWMUgxYkZ3cyIsInNjb3BlIjoiamxoYXduOnJlcG9zaXRvcnk6c2FtYWxiYS9teS1hcHA6cHVzaCxwdWxsIGpsaGF3bjpuYW1lc3BhY2U6c2FtYWxiYTpwdWxsIn0.Y3zZSwaZPqy4y9oRBVRImZyv3m_S9XDHF1tWwN7mL52C_IiA73SJkWVNsvNqpJIn5h7A2F8biv_S2ppQ1lgkbw +``` + +This is also described in [Section 2.1 of RFC 6750: The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://tools.ietf.org/html/rfc6750#section-2.1) \ No newline at end of file diff --git a/content/reference/api/registry/images/v2-registry-auth.png b/content/reference/api/registry/images/v2-registry-auth.png new file mode 100644 index 000000000000..0ea8a4205bc3 Binary files /dev/null and b/content/reference/api/registry/images/v2-registry-auth.png differ diff --git a/content/reference/api/registry/latest.md b/content/reference/api/registry/latest.md new file mode 100644 index 000000000000..7df6e5971659 --- /dev/null +++ b/content/reference/api/registry/latest.md @@ -0,0 +1,7 @@ +--- +layout: api +title: Supported registry API for Docker Hub +linktitle: Latest +description: "Supported registry API endpoints." +keywords: registry, on-prem, images, tags, repository, distribution, api, advanced +--- diff --git a/content/reference/api/registry/latest.yaml b/content/reference/api/registry/latest.yaml new file mode 100644 index 000000000000..c38c9ffd718a --- /dev/null +++ b/content/reference/api/registry/latest.yaml @@ -0,0 +1,1345 @@ +openapi: 3.0.3 +info: + title: Supported registry API for Docker Hub + description: | + Docker Hub is an OCI-compliant registry, which means it adheres to the open + standards defined by the Open Container Initiative (OCI) for distributing + container images. This ensures compatibility with a wide range of tools and + platforms in the container ecosystem. + + This reference documents the Docker Hub-supported subset of the Registry HTTP API V2. + It focuses on pulling, pushing, and deleting images. It does not cover the full OCI Distribution Specification. + + For the complete OCI specification, see [OCI Distribution Specification](https://github.com/opencontainers/distribution-spec). +servers: + - description: Docker Hub registry API + x-audience: public + url: https://registry-1.docker.io + +tags: + - name: overview + x-displayName: Overview + description: | + All endpoints in this API are prefixed by the version and repository name, for example: + + ``` + /v2// + ``` + + This format provides structured access control and URI-based scoping of image operations. + + For example, to interact with the `library/ubuntu` repository, use: + + ``` + /v2/library/ubuntu/ + ``` + + Repository names must meet these requirements: + 1. Consist of path components matching `[a-z0-9]+(?:[._-][a-z0-9]+)*` + 2. If more than one component, they must be separated by `/` + 3. Full repository name must be fewer than 256 characters + + + - name: authentication + x-displayName: Authentication + description: | + Specifies registry authentication. + externalDocs: + description: Detailed authentication workflow and token usage + url: https://docs.docker.com/reference/api/registry/auth/ + + - name: Manifests + x-displayName: Manifests + description: | + Image manifests are JSON documents that describe an image: its configuration blob, the digests of each layer blob, and metadata such as media‑types and annotations. + + - name: Blobs + x-displayName: Blobs + description: | + Blobs are the binary objects referenced from manifests: + the config JSON and one or more compressed layer tarballs. + + - name: pull + x-displayName: Pulling Images + description: | + Pulling an image involves retrieving the manifest and downloading each of the image's layer blobs. This section outlines the general steps followed by a working example. + + 1. [Get a bearer token for the repository](https://docs.docker.com/reference/api/registry/auth/). + 2. [Get the image manifest](#operation/GetImageManifest). + 3. If the response in the previous step is a multi-architecture manifest list, you must do the following: + - Parse the `manifests[]` array to locate the digest for your target platform (e.g., `linux/amd64`). + - [Get the image manifest](#operation/GetImageManifest) using the located digest. + 4. [Check if the blob exists](#operation/CheckBlobExists) before downloading. The client should send a `HEAD` request for each layer digest. + 5. [Download each layer blob](#operation/GetBlob) using the digest obtained from the manifest. The client should send a `GET` request for each layer digest. + + The following bash script example pulls `library/ubuntu:latest` from Docker Hub. + + ```bash + #!/bin/bash + + # Step 1: Get a bearer token + TOKEN=$(curl -s "https://auth.docker.io/token?service=registry.docker.io&scope=repository:library/ubuntu:pull" | jq -r .token) + + # Step 2: Get the image manifest. In this example, an image manifest list is returned. + curl -s -H "Authorization: Bearer $TOKEN" \ + -H "Accept: application/vnd.docker.distribution.manifest.list.v2+json" \ + https://registry-1.docker.io/v2/library/ubuntu/manifests/latest \ + -o manifest-list.json + + # Step 3a: Parse the `manifests[]` array to locate the digest for your target platform (e.g., `linux/amd64`). + IMAGE_MANIFEST_DIGEST=$(jq -r '.manifests[] | select(.platform.architecture == "amd64" and .platform.os == "linux") | .digest' manifest-list.json) + + # Step 3b: Get the platform-specific image manifest + curl -s -H "Authorization: Bearer $TOKEN" \ + -H "Accept: application/vnd.docker.distribution.manifest.v2+json" \ + https://registry-1.docker.io/v2/library/ubuntu/manifests/$IMAGE_MANIFEST_DIGEST \ + -o manifest.json + + # Step 4: Send a HEAD request to check if the layer blob exists + DIGEST=$(jq -r '.layers[0].digest' manifest.json) + curl -I -H "Authorization: Bearer $TOKEN" \ + https://registry-1.docker.io/v2/library/ubuntu/blobs/$DIGEST + + # Step 5: Download the layer blob + curl -L -H "Authorization: Bearer $TOKEN" \ + https://registry-1.docker.io/v2/library/ubuntu/blobs/$DIGEST + ``` + + This example pulls the manifest and the first layer for the `ubuntu:latest` image on the `linux/amd64` platform. Repeat steps 4 and 5 for each digest in the `.layers[]` array in the manifest. + + + - name: push + x-displayName: Pushing Images + description: | + Pushing an image involves uploading any image blobs (such as the config or layers), and then uploading the manifest that references those blobs. + + This section outlines the basic steps to push an image using the registry API. + + 1. [Get a bearer token for the repository](https://docs.docker.com/reference/api/registry/auth/) + + 2. [Check if the blob exists](#operation/CheckBlobExists) using a `HEAD` request for each blob digest. + + 3. If the blob does not exist, [upload the blob](#operation/CompleteBlobUpload) using a monolithic `PUT` request: + - First, [initiate the upload](#operation/InitiateBlobUpload) with `POST`. + - Then [upload and complete](#operation/CompleteBlobUpload) with `PUT`. + + **Note**: Alternatively, you can upload the blob in multiple chunks by using `PATCH` requests to send each chunk, followed by a final `PUT` request to complete the upload. This is known as a [chunked upload](#operation/UploadBlobChunk) and is useful for large blobs or when resuming interrupted uploads. + + + 4. [Upload the image manifest](#operation/PutImageManifest) using a `PUT` request to associate the config and layers. + + The following bash script example pushes a dummy config blob and manifest to `yourusername/helloworld:latest` on Docker Hub. You can replace `yourusername` with your Docker Hub username and `dckr_pat` with your Docker Hub personal access token. + + ```bash + #!/bin/bash + + USERNAME=yourusername + PASSWORD=dckr_pat + REPO=yourusername/helloworld + TAG=latest + CONFIG=config.json + MIME_TYPE=application/vnd.docker.container.image.v1+json + + # Step 1: Get a bearer token + TOKEN=$(curl -s -u "$USERNAME:$PASSWORD" \ + "https://auth.docker.io/token?service=registry.docker.io&scope=repository:$REPO:push,pull" \ + | jq -r .token) + + # Create a dummy config blob and compute its digest + echo '{"architecture":"amd64","os":"linux","config":{},"rootfs":{"type":"layers","diff_ids":[]}}' > $CONFIG + DIGEST="sha256:$(sha256sum $CONFIG | awk '{print $1}')" + + # Step 2: Check if the blob exists + STATUS=$(curl -s -o /dev/null -w "%{http_code}" -I \ + -H "Authorization: Bearer $TOKEN" \ + https://registry-1.docker.io/v2/$REPO/blobs/$DIGEST) + + if [ "$STATUS" != "200" ]; then + # Step 3: Upload blob using monolithic upload + LOCATION=$(curl -sI -X POST \ + -H "Authorization: Bearer $TOKEN" \ + https://registry-1.docker.io/v2/$REPO/blobs/uploads/ \ + | grep -i Location | tr -d '\r' | awk '{print $2}') + + curl -s -X PUT "$LOCATION&digest=$DIGEST" \ + -H "Authorization: Bearer $TOKEN" \ + -H "Content-Type: application/octet-stream" \ + --data-binary @$CONFIG + fi + + # Step 4: Upload the manifest that references the config blob + MANIFEST=$(cat <` header. + + x-codeSamples: + - lang: Bash + label: cURL + source: | + # GET a manifest (by tag or digest) + curl -H "Authorization: Bearer $TOKEN" \ + -H "Accept: application/vnd.docker.distribution.manifest.v2+json" \ + https://registry-1.docker.io/v2/library/ubuntu/manifests/latest + parameters: + - name: name + in: path + required: true + description: Name of the target repository + example: library/ubuntu + schema: + type: string + - name: reference + in: path + required: true + description: Tag or digest of the target manifest + examples: + by-tag: + summary: Tag + value: latest + by-digest: + summary: Digest + value: sha256:abc123def456... + schema: + type: string + - name: Authorization + in: header + required: true + description: RFC7235-compliant authorization header (e.g., `Bearer `). + schema: + type: string + - name: Accept + in: header + required: false + description: | + Media type(s) the client supports for the manifest. + + The registry supports the following media types: + - application/vnd.docker.distribution.manifest.v2+json + - application/vnd.docker.distribution.manifest.list.v2+json + - application/vnd.oci.image.manifest.v1+json + - application/vnd.oci.image.index.v1+json + schema: + type: string + + responses: + "200": + description: Manifest fetched successfully. + headers: + Docker-Content-Digest: + description: Digest of the returned manifest content. + schema: + type: string + Content-Type: + description: Media type of the returned manifest. + schema: + type: string + content: + application/vnd.docker.distribution.manifest.v2+json: + schema: + type: object + required: + - schemaVersion + - mediaType + - config + - layers + properties: + schemaVersion: + type: integer + example: 2 + mediaType: + type: string + example: application/vnd.docker.distribution.manifest.v2+json + config: + type: object + properties: + mediaType: + type: string + example: application/vnd.docker.container.image.v1+json + size: + type: integer + example: 7023 + digest: + type: string + example: sha256:a3f3e...c1234 + layers: + type: array + items: + type: object + properties: + mediaType: + type: string + example: application/vnd.docker.image.rootfs.diff.tar.gzip + size: + type: integer + example: 32654 + digest: + type: string + example: sha256:bcf2...78901 + examples: + docker-manifest: + summary: Docker image manifest (schema v2) + value: + { + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "config": { + "mediaType": "application/vnd.docker.container.image.v1+json", + "size": 7023, + "digest": "sha256:123456abcdef..." + }, + "layers": [ + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 32654, + "digest": "sha256:abcdef123456..." + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 16724, + "digest": "sha256:7890abcdef12..." + } + ] + } + + "400": + description: Invalid name or reference. + "401": + description: Authentication required. + "403": + description: Access denied. + "404": + description: Repository or manifest not found. + "429": + description: Too many requests. + + + put: + tags: + - Manifests + summary: Put image manifest + operationId: PutImageManifest + description: | + Upload an image manifest for a given tag or digest. This operation registers a manifest in a repository, allowing it to be pulled using the specified reference. + + This endpoint is typically used after all layer and config blobs have been uploaded to the registry. + + The manifest must conform to the expected schema and media type. For Docker image manifest schema version 2, use: + `application/vnd.docker.distribution.manifest.v2+json` + + Requires authentication via a bearer token with `push` scope for the target repository. + x-codeSamples: + - lang: Bash + label: cURL + source: | + # PUT a manifest (tag = latest) + curl -X PUT \ + -H "Authorization: Bearer $TOKEN" \ + -H "Content-Type: application/vnd.docker.distribution.manifest.v2+json" \ + --data-binary @manifest.json \ + https://registry-1.docker.io/v2/library/ubuntu/manifests/latest + parameters: + - name: name + in: path + required: true + description: Name of the target Repository + example: library/ubuntu + schema: + type: string + - name: reference + in: path + required: true + description: Tag or digest to associate with the uploaded Manifest + examples: + by-tag: + summary: Tag + value: latest + by-digest: + summary: Digest + value: sha256:abc123def456... + schema: + type: string + - name: Authorization + in: header + required: true + description: RFC7235-compliant authorization header (e.g., `Bearer `). + schema: + type: string + - name: Content-Type + in: header + required: true + description: Media type of the manifest being uploaded. + schema: + type: string + example: application/vnd.docker.distribution.manifest.v2+json + + requestBody: + required: true + content: + application/vnd.docker.distribution.manifest.v2+json: + schema: + type: object + required: + - schemaVersion + - mediaType + - config + - layers + properties: + schemaVersion: + type: integer + example: 2 + mediaType: + type: string + example: application/vnd.docker.distribution.manifest.v2+json + config: + type: object + required: + - mediaType + - size + - digest + properties: + mediaType: + type: string + example: application/vnd.docker.container.image.v1+json + size: + type: integer + example: 7023 + digest: + type: string + example: sha256:123456abcdef... + layers: + type: array + items: + type: object + required: + - mediaType + - size + - digest + properties: + mediaType: + type: string + example: application/vnd.docker.image.rootfs.diff.tar.gzip + size: + type: integer + example: 32654 + digest: + type: string + example: sha256:abcdef123456... + + examples: + sample-manifest: + summary: Sample Docker image manifest (schema v2) + value: + { + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "config": { + "mediaType": "application/vnd.docker.container.image.v1+json", + "size": 7023, + "digest": "sha256:123456abcdef..." + }, + "layers": [ + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 32654, + "digest": "sha256:abcdef123456..." + } + ] + } + + responses: + "201": + description: Manifest created successfully. + headers: + Docker-Content-Digest: + description: Digest of the stored manifest. + schema: + type: string + example: sha256:abcdef123456... + Location: + description: Canonical location of the uploaded manifest. + schema: + type: string + example: /v2/library/ubuntu/manifests/latest + Content-Length: + description: Always zero. + schema: + type: integer + example: 0 + "400": + description: Invalid name, reference, or manifest. + "401": + description: Authentication required. + "403": + description: Access denied. + "404": + description: Repository not found. + "405": + description: Operation not allowed. + "429": + description: Too many requests. + head: + tags: + - Manifests + summary: Check if manifest exists + operationId: HeadImageManifest + description: | + Use this endpoint to verify whether a manifest exists by tag or digest. + + This is a lightweight operation that returns only headers (no body). It is useful for: + - Checking for the existence of a specific image version + - Determining the digest or size of a manifest before downloading or deleting + + This endpoint requires authentication with pull scope. + + parameters: + - name: name + in: path + required: true + description: Name of the Repository + example: library/ubuntu + schema: + type: string + - name: reference + in: path + required: true + description: Tag or digest to check + examples: + by-tag: + summary: Tag + value: latest + by-digest: + summary: Digest + value: sha256:abc123def456... + schema: + type: string + - name: Authorization + in: header + required: true + schema: + type: string + description: Bearer token for authentication + - name: Accept + in: header + required: false + schema: + type: string + example: application/vnd.docker.distribution.manifest.v2+json + description: | + Media type of the manifest to check. The response will match one of the accepted types. + x-codeSamples: + - lang: Bash + label: cURL + source: | + # HEAD /v2/{name}/manifests/{reference} + curl -I \ + -H "Authorization: Bearer $TOKEN" \ + -H "Accept: application/vnd.docker.distribution.manifest.v2+json" \ + https://registry-1.docker.io/v2/library/ubuntu/manifests/latest + responses: + "200": + description: Manifest exists. + headers: + Content-Length: + description: Size of the manifest in bytes + schema: + type: integer + example: 7082 + Docker-Content-Digest: + description: Digest of the manifest + schema: + type: string + example: sha256:abc123... + Content-Type: + description: Media type of the manifest + schema: + type: string + example: application/vnd.docker.distribution.manifest.v2+json + "404": + description: Manifest not found. + "401": + description: Authentication required. + "403": + description: Access denied. + "429": + description: Too many requests. + delete: + tags: + - Manifests + summary: Delete image manifest + operationId: DeleteImageManifest + description: | + Delete an image manifest from a repository by digest. + + Only untagged or unreferenced manifests can be deleted. If the manifest is still referenced by a tag or another image, the registry will return `403 Forbidden`. + + This operation requires `delete` access to the repository. + parameters: + - name: name + in: path + required: true + description: Name of the repository + example: yourusername/helloworld + schema: + type: string + - name: reference + in: path + required: true + description: Digest of the manifest to delete (e.g., `sha256:...`) + example: sha256:abc123def456... + schema: + type: string + - name: Authorization + in: header + required: true + description: Bearer token with `delete` access + schema: + type: string + x-codeSamples: + - lang: Bash + label: cURL + source: | + # DELETE a manifest by digest + curl -X DELETE \ + -H "Authorization: Bearer $TOKEN" \ + https://registry-1.docker.io/v2/yourusername/helloworld/manifests/sha256:abc123def456... + responses: + "202": + description: Manifest deleted successfully. No content returned. + "401": + description: Authentication required. + "403": + description: Access denied. The manifest may still be referenced. + "404": + description: Manifest or repository not found. + "405": + description: Only digest-based deletion is allowed. + "429": + description: Too many requests. + /v2/{name}/blobs/uploads/: + post: + tags: + - Blobs + summary: Initiate blob upload or attempt cross-repository blob mount + operationId: InitiateBlobUpload + description: | + Initiate an upload session for a blob (layer or config) in a repository. + + This is the first step in uploading a blob. It returns a `Location` URL where the blob can be uploaded using `PATCH` (chunked) or `PUT` (monolithic). + + Instead of uploading a blob, a client may attempt to mount a blob from another repository (if it has read access) by including the `mount` and `from` query parameters. + + If successful, the registry responds with `201 Created` and the blob is reused without re-upload. + + If the mount fails, the upload proceeds as usual and returns a `202 Accepted`. + + You must authenticate with `push` access to the target repository. + x-codeSamples: + - lang: Bash + label: cURL (Initiate Standard Upload) + source: | + # Initiate a standard blob upload session + curl -i -X POST \ + -H "Authorization: Bearer $TOKEN" \ + https://registry-1.docker.io/v2/library/ubuntu/blobs/uploads/ + + - lang: Bash + label: cURL (Cross-Repository Blob Mount) + source: | + # Attempt a cross-repository blob mount + curl -i -X POST \ + -H "Authorization: Bearer $TOKEN" \ + "https://registry-1.docker.io/v2/library/ubuntu/blobs/uploads/?mount=sha256:abc123def456...&from=library/busybox" + + parameters: + - name: name + in: path + required: true + description: Name of the target repository + example: library/ubuntu + schema: + type: string + - name: mount + in: query + required: false + description: Digest of the blob to mount from another repository + schema: + type: string + example: sha256:abc123def456... + - name: from + in: query + required: false + description: Source repository to mount the blob from + schema: + type: string + example: library/busybox + - name: Authorization + in: header + required: true + schema: + type: string + description: Bearer token for authentication with `push` scope + + responses: + "201": + description: Blob successfully mounted from another repository. + headers: + Location: + description: URL where the mounted blob is accessible + schema: + type: string + example: /v2/library/ubuntu/blobs/sha256:abc123... + Docker-Content-Digest: + description: Canonical digest of the mounted blob + schema: + type: string + example: sha256:abc123... + Content-Length: + description: Always zero + schema: + type: integer + example: 0 + "202": + description: Upload initiated successfully (fallback if mount fails). + headers: + Location: + description: Upload location URL for `PATCH` or `PUT` requests + schema: + type: string + example: /v2/library/ubuntu/blobs/uploads/abc123 + Docker-Upload-UUID: + description: Server-generated UUID for the upload session + schema: + type: string + example: abc123 + Range: + description: Current upload byte range (typically `0-0` at init) + schema: + type: string + example: 0-0 + Content-Length: + description: Always zero + schema: + type: integer + example: 0 + "401": + description: Authentication required. + "403": + description: Access denied. + "404": + description: Repository not found. + "429": + description: Too many requests. + /v2/{name}/blobs/{digest}: + head: + tags: + - Blobs + summary: Check existence of blob + operationId: CheckBlobExists + description: | + Check whether a blob (layer or config) exists in the registry. + + This is useful before uploading a blob to avoid duplicates. + + If the blob is present, the registry returns a `200 OK` response with headers like `Content-Length` and `Docker-Content-Digest`. + + If the blob does not exist, the response will be `404 Not Found`. + x-codeSamples: + - lang: Bash + label: cURL + source: | + # HEAD to check if a blob exists + curl -I \ + -H "Authorization: Bearer $TOKEN" \ + https://registry-1.docker.io/v2/library/ubuntu/blobs/sha256:abc123... + parameters: + - name: name + in: path + required: true + description: Name of the Repository + example: library/ubuntu + schema: + type: string + - name: digest + in: path + required: true + description: Digest of the blob + schema: + type: string + example: sha256:abc123def4567890... + - name: Authorization + in: header + required: true + description: Bearer token with pull or push scope + schema: + type: string + example: Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6... + + responses: + "200": + description: Blob exists + headers: + Content-Length: + description: Size of the blob in bytes + schema: + type: integer + example: 32654 + Docker-Content-Digest: + description: Digest of the blob + schema: + type: string + example: sha256:abc123def4567890... + Content-Type: + description: MIME type of the blob content + schema: + type: string + example: application/octet-stream + content: + application/json: + examples: + blob-check-request: + summary: Sample request + value: + method: HEAD + url: /v2/library/ubuntu/blobs/sha256:abc123def4567890... + headers: + Authorization: Bearer + Accept: '*/*' + blob-check-response: + summary: Sample 200 response headers + value: + status: 200 OK + headers: + Docker-Content-Digest: sha256:abc123def4567890... + Content-Length: 32654 + Content-Type: application/octet-stream + + "404": + description: Blob not found + "401": + description: Authentication required + "403": + description: Access denied + "429": + description: Too many requests + get: + tags: + - Blobs + summary: Retrieve blob + operationId: GetBlob + description: | + Download the blob identified by digest from the registry. + + Blobs include image layers and configuration objects. Clients must use the digest from the manifest to retrieve a blob. + + This endpoint may return a `307 Temporary Redirect` to a CDN or storage location. Clients must follow the redirect to obtain the actual blob content. + + The blob content is typically a gzipped tarball (for layers) or JSON (for configs). The MIME type is usually `application/octet-stream`. + x-codeSamples: + - lang: Bash + label: cURL + source: | + # GET (download) a blob + curl -L \ + -H "Authorization: Bearer $TOKEN" \ + https://registry-1.docker.io/v2/library/ubuntu/blobs/sha256:abc123... \ + -o layer.tar.gz + parameters: + - name: name + in: path + required: true + description: Repository Name + example: library/ubuntu + schema: + type: string + - name: digest + in: path + required: true + description: Digest of the Blob + schema: + type: string + example: sha256:abc123def456... + - name: Authorization + in: header + required: true + schema: + type: string + description: Bearer token with pull scope + example: Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6... + + responses: + "200": + description: Blob content returned directly + headers: + Content-Length: + description: Size of the blob in bytes + schema: + type: integer + example: 32768 + Content-Type: + description: MIME type of the blob + schema: + type: string + example: application/octet-stream + Docker-Content-Digest: + description: Digest of the returned blob + schema: + type: string + example: sha256:abc123def456... + content: + application/octet-stream: + schema: + type: string + format: binary + examples: + small-layer: + summary: Example binary blob (gzipped tar layer) + value: "" + + "307": + description: Temporary redirect to blob location + headers: + Location: + description: Redirect URL for blob download (e.g., S3 or CDN) + schema: + type: string + example: https://cdn.docker.io/blobs/library/ubuntu/abc123... + "401": + description: Authentication required + "403": + description: Access denied + "404": + description: Blob not found + "429": + description: Too many requests + /v2/{name}/blobs/uploads/{uuid}: + get: + tags: + - Blobs + summary: Get blob upload status + operationId: GetBlobUploadStatus + description: | + Retrieve the current status of an in-progress blob upload. + + This is useful for: + - Resuming an interrupted upload + - Determining how many bytes have been accepted so far + - Retrying from the correct offset in chunked uploads + + The response includes the `Range` header indicating the byte range received so far, and a `Docker-Upload-UUID` for identifying the session. + x-codeSamples: + - lang: Bash + label: cURL + source: | + # GET upload status + curl -I \ + -H "Authorization: Bearer $TOKEN" \ + https://registry-1.docker.io/v2/library/ubuntu/blobs/uploads/abc123 + parameters: + - name: name + in: path + required: true + description: Repository Name + example : library/ubuntu + schema: + type: string + - name: uuid + in: path + required: true + description: Upload session UUID + schema: + type: string + example: abc123 + - name: Authorization + in: header + required: true + schema: + type: string + example: Bearer eyJhbGciOi... + + responses: + "204": + description: Upload in progress. No body is returned. + headers: + Range: + description: Current byte range uploaded (inclusive) + schema: + type: string + example: 0-16383 + Docker-Upload-UUID: + description: UUID of the upload session + schema: + type: string + example: abc123 + Location: + description: URL to continue or complete the upload + schema: + type: string + example: /v2/library/ubuntu/blobs/uploads/abc123 + "401": + description: Authentication required + "403": + description: Access denied + "404": + description: Upload session not found + "429": + description: Too many requests + + put: + tags: + - Blobs + summary: Complete blob upload + operationId: CompleteBlobUpload + description: | + Complete the upload of a blob by finalizing an upload session. + + This request must include the `digest` query parameter and optionally the last chunk of data. When the registry receives this request, it verifies the digest and stores the blob. + + This endpoint supports: + - Monolithic uploads (upload entire blob in this request) + - Finalizing chunked uploads (last chunk plus `digest`) + + x-codeSamples: + - lang: Bash + label: cURL + source: | + # PUT – complete upload (monolithic or final chunk) + curl -X PUT \ + -H "Authorization: Bearer $TOKEN" \ + -H "Content-Type: application/octet-stream" \ + --data-binary @layer.tar.gz \ + "https://registry-1.docker.io/v2/library/ubuntu/blobs/uploads/abc123?digest=sha256:abcd1234..." + + + parameters: + - name: name + in: path + required: true + description: Repository name + schema: + type: string + example: library/ubuntu + - name: uuid + in: path + required: true + description: Upload session UUID returned from the POST request + schema: + type: string + example: abc123 + - name: digest + in: query + required: true + description: Digest of the uploaded blob + schema: + type: string + example: sha256:abcd1234... + - name: Authorization + in: header + required: true + schema: + type: string + example: Bearer eyJhbGciOi... + + requestBody: + required: false + content: + application/octet-stream: + schema: + type: string + format: binary + examples: + layer-upload: + summary: Layer tarball blob + value: "" + + responses: + "201": + description: Upload completed successfully + headers: + Docker-Content-Digest: + description: Canonical digest of the stored blob + schema: + type: string + example: sha256:abcd1234... + Location: + description: URL where the blob is now accessible + schema: + type: string + example: /v2/library/ubuntu/blobs/sha256:abcd1234... + Content-Length: + description: Always zero for completed uploads + schema: + type: integer + example: 0 + "400": + description: Invalid digest or missing parameters + "401": + description: Authentication required + "403": + description: Access denied + "404": + description: Upload session not found + "416": + description: Requested range not satisfiable (if used in chunked mode) + "429": + description: Too many requests + + patch: + tags: + - Blobs + summary: Upload blob chunk + operationId: UploadBlobChunk + description: | + Upload a chunk of a blob to an active upload session. + + Use this method for **chunked uploads**, especially for large blobs or when resuming interrupted uploads. + + The client sends binary data using `PATCH`, optionally including a `Content-Range` header. + + After each chunk is accepted, the registry returns a `202 Accepted` response with: + - `Range`: current byte range stored + - `Docker-Upload-UUID`: identifier for the upload session + - `Location`: URL to continue the upload or finalize with `PUT` + x-codeSamples: + - lang: Bash + label: cURL + source: | + # PATCH – upload a chunk (first 64 KiB) + curl -X PATCH \ + -H "Authorization: Bearer $TOKEN" \ + -H "Content-Type: application/octet-stream" \ + --data-binary @chunk-0.bin \ + "https://registry-1.docker.io/v2/library/ubuntu/blobs/uploads/abc123" + parameters: + - name: name + in: path + required: true + description: Repository name + schema: + type: string + example: library/ubuntu + - name: uuid + in: path + required: true + description: Upload session UUID + schema: + type: string + example: abc123 + - name: Authorization + in: header + required: true + schema: + type: string + example: Bearer eyJhbGciOi... + - name: Content-Range + in: header + required: false + schema: + type: string + example: bytes 0-65535 + description: Optional. Byte range of the chunk being sent + + requestBody: + required: true + content: + application/octet-stream: + schema: + type: string + format: binary + examples: + chunk-0: + summary: Upload chunk 0 of a blob + value: "" + + responses: + "202": + description: Chunk accepted and stored + headers: + Location: + description: URL to continue or finalize the upload + schema: + type: string + example: /v2/library/ubuntu/blobs/uploads/abc123 + Range: + description: Byte range uploaded so far (inclusive) + schema: + type: string + example: 0-65535 + Docker-Upload-UUID: + description: Upload session UUID + schema: + type: string + example: abc123 + "400": + description: Malformed content or range + "401": + description: Authentication required + "403": + description: Access denied + "404": + description: Upload session not found + "416": + description: Range error (e.g., chunk out of order) + "429": + description: Too many requests + delete: + tags: + - Blobs + summary: Cancel blob upload + operationId: CancelBlobUpload + description: | + Cancel an in-progress blob upload session. + + This operation discards any data that has been uploaded and invalidates the upload session. + + Use this when: + - An upload fails or is aborted mid-process + - The client wants to clean up unused upload sessions + + After cancellation, the UUID is no longer valid and a new `POST` must be issued to restart the upload. + + x-codeSamples: + - lang: Bash + label: cURL + source: | + # DELETE – cancel an upload session + curl -X DELETE \ + -H "Authorization: Bearer $TOKEN" \ + https://registry-1.docker.io/v2/library/ubuntu/blobs/uploads/abc123` + + parameters: + - name: name + in: path + required: true + description: Name of the repository + schema: + type: string + example: library/ubuntu + - name: uuid + in: path + required: true + description: Upload session UUID + schema: + type: string + example: abc123 + - name: Authorization + in: header + required: true + schema: + type: string + example: Bearer eyJhbGciOi... + + responses: + "204": + description: Upload session cancelled successfully. No body is returned. + headers: + Content-Length: + description: Always zero + schema: + type: integer + example: 0 + "401": + description: Authentication required + "403": + description: Access denied + "404": + description: Upload session not found + "429": + description: Too many requests + + +x-tagGroups: + - name: General + tags: + - overview + - authentication + - pull + - push + - delete + - name: API + tags: + - Manifests + - Blobs diff --git a/content/reference/cli/docker/buildx/history/export.md b/content/reference/cli/docker/buildx/history/export.md new file mode 100644 index 000000000000..6f38ca206415 --- /dev/null +++ b/content/reference/cli/docker/buildx/history/export.md @@ -0,0 +1,16 @@ +--- +datafolder: buildx +datafile: docker_buildx_history_export +title: docker buildx history export +layout: cli +aliases: +- /engine/reference/commandline/buildx_history_export/ +--- + + diff --git a/content/reference/cli/docker/buildx/history/import.md b/content/reference/cli/docker/buildx/history/import.md new file mode 100644 index 000000000000..cd918893cdda --- /dev/null +++ b/content/reference/cli/docker/buildx/history/import.md @@ -0,0 +1,16 @@ +--- +datafolder: buildx +datafile: docker_buildx_history_import +title: docker buildx history import +layout: cli +aliases: +- /engine/reference/commandline/buildx_history_import/ +--- + + diff --git a/content/reference/cli/docker/buildx/history/trace.md b/content/reference/cli/docker/buildx/history/trace.md new file mode 100644 index 000000000000..4814477d6534 --- /dev/null +++ b/content/reference/cli/docker/buildx/history/trace.md @@ -0,0 +1,16 @@ +--- +datafolder: buildx +datafile: docker_buildx_history_trace +title: docker buildx history trace +layout: cli +aliases: +- /engine/reference/commandline/buildx_history_trace/ +--- + + diff --git a/content/reference/cli/docker/compose/bridge/_index.md b/content/reference/cli/docker/compose/bridge/_index.md new file mode 100644 index 000000000000..a076d5f7d302 --- /dev/null +++ b/content/reference/cli/docker/compose/bridge/_index.md @@ -0,0 +1,13 @@ +--- +datafolder: compose-cli +datafile: docker_compose_bridge +title: docker compose bridge +layout: cli +--- + + \ No newline at end of file diff --git a/content/reference/cli/docker/compose/bridge/convert.md b/content/reference/cli/docker/compose/bridge/convert.md new file mode 100644 index 000000000000..c8310f0c0d70 --- /dev/null +++ b/content/reference/cli/docker/compose/bridge/convert.md @@ -0,0 +1,13 @@ +--- +datafolder: compose-cli +datafile: docker_compose_bridge_convert +title: docker compose bridge convert +layout: cli +--- + + \ No newline at end of file diff --git a/content/reference/cli/docker/compose/bridge/transformations/_index.md b/content/reference/cli/docker/compose/bridge/transformations/_index.md new file mode 100644 index 000000000000..d7e61d411c9d --- /dev/null +++ b/content/reference/cli/docker/compose/bridge/transformations/_index.md @@ -0,0 +1,13 @@ +--- +datafolder: compose-cli +datafile: docker_compose_bridge_transformations +title: docker compose bridge transformations +layout: cli +--- + + \ No newline at end of file diff --git a/content/reference/cli/docker/compose/bridge/transformations/create.md b/content/reference/cli/docker/compose/bridge/transformations/create.md new file mode 100644 index 000000000000..efc0801f27ed --- /dev/null +++ b/content/reference/cli/docker/compose/bridge/transformations/create.md @@ -0,0 +1,13 @@ +--- +datafolder: compose-cli +datafile: docker_compose_bridge_transformations_create +title: docker compose bridge transformations create +layout: cli +--- + + \ No newline at end of file diff --git a/content/reference/cli/docker/compose/bridge/transformations/list.md b/content/reference/cli/docker/compose/bridge/transformations/list.md new file mode 100644 index 000000000000..5af7198a69a7 --- /dev/null +++ b/content/reference/cli/docker/compose/bridge/transformations/list.md @@ -0,0 +1,13 @@ +--- +datafolder: compose-cli +datafile: docker_compose_bridge_transformations_list +title: docker compose bridge transformations list +layout: cli +--- + + \ No newline at end of file diff --git a/content/reference/cli/docker/compose/config.md b/content/reference/cli/docker/compose/config.md index a36cce088952..60ed541a15e3 100644 --- a/content/reference/cli/docker/compose/config.md +++ b/content/reference/cli/docker/compose/config.md @@ -6,6 +6,7 @@ aliases: - /compose/reference/config/ - /engine/reference/commandline/compose_convert/ - /engine/reference/commandline/compose_config/ +- /compose/config/ layout: cli --- diff --git a/content/reference/cli/docker/compose/publish.md b/content/reference/cli/docker/compose/publish.md new file mode 100644 index 000000000000..78673b8020b1 --- /dev/null +++ b/content/reference/cli/docker/compose/publish.md @@ -0,0 +1,16 @@ +--- +datafolder: compose-cli +datafile: docker_compose_publish +title: docker compose publish +layout: cli +aliases: + - /reference/cli/docker/compose/alpha/publish/ + - /engine/reference/commandline/compose_alpha_publish/ +--- + + \ No newline at end of file diff --git a/content/reference/cli/docker/compose/alpha/publish.md b/content/reference/cli/docker/compose/volumes.md similarity index 68% rename from content/reference/cli/docker/compose/alpha/publish.md rename to content/reference/cli/docker/compose/volumes.md index 34d713568728..881d25490d28 100644 --- a/content/reference/cli/docker/compose/alpha/publish.md +++ b/content/reference/cli/docker/compose/volumes.md @@ -1,10 +1,8 @@ --- datafolder: compose-cli -datafile: docker_compose_alpha_publish -title: docker compose alpha publish +datafile: docker_compose_volumes +title: docker compose volumes layout: cli -aliases: -- /engine/reference/commandline/compose_alpha_publish/ --- +{{< summary-bar feature_name="Docker Init" >}} diff --git a/content/reference/cli/docker/model/_index.md b/content/reference/cli/docker/model/_index.md new file mode 100644 index 000000000000..57b18999588a --- /dev/null +++ b/content/reference/cli/docker/model/_index.md @@ -0,0 +1,14 @@ +--- +datafolder: model-cli +datafile: docker_model +title: docker model +layout: cli +--- + + diff --git a/content/reference/cli/docker/model/inspect.md b/content/reference/cli/docker/model/inspect.md new file mode 100644 index 000000000000..f0b638f51894 --- /dev/null +++ b/content/reference/cli/docker/model/inspect.md @@ -0,0 +1,16 @@ +--- +datafolder: model-cli +datafile: docker_model_inspect +title: docker model inspect +layout: cli +aliases: +- /engine/reference/commandline/model_inspect/ +--- + + diff --git a/content/reference/cli/docker/model/install-runner.md b/content/reference/cli/docker/model/install-runner.md new file mode 100644 index 000000000000..56c44bf91ba5 --- /dev/null +++ b/content/reference/cli/docker/model/install-runner.md @@ -0,0 +1,16 @@ +--- +datafolder: model-cli +datafile: docker_model_install-runner +title: docker model install-runner +layout: cli +aliases: +- /engine/reference/commandline/model_install-runner/ +--- + + diff --git a/content/reference/cli/docker/model/list.md b/content/reference/cli/docker/model/list.md new file mode 100644 index 000000000000..3ad4facdbfee --- /dev/null +++ b/content/reference/cli/docker/model/list.md @@ -0,0 +1,16 @@ +--- +datafolder: model-cli +datafile: docker_model_list +title: docker model list +layout: cli +aliases: +- /engine/reference/commandline/model_list/ +--- + + diff --git a/content/reference/cli/docker/model/logs.md b/content/reference/cli/docker/model/logs.md new file mode 100644 index 000000000000..6c684d1843f3 --- /dev/null +++ b/content/reference/cli/docker/model/logs.md @@ -0,0 +1,16 @@ +--- +datafolder: model-cli +datafile: docker_model_logs +title: docker model logs +layout: cli +aliases: +- /engine/reference/commandline/model_logs/ +--- + + diff --git a/content/reference/cli/docker/model/package.md b/content/reference/cli/docker/model/package.md new file mode 100644 index 000000000000..16015f63ef7a --- /dev/null +++ b/content/reference/cli/docker/model/package.md @@ -0,0 +1,16 @@ +--- +datafolder: model-cli +datafile: docker_model_package +title: docker model package +layout: cli +aliases: +- /engine/reference/commandline/model_package/ +--- + + diff --git a/content/reference/cli/docker/model/pull.md b/content/reference/cli/docker/model/pull.md new file mode 100644 index 000000000000..e6db51a8c34f --- /dev/null +++ b/content/reference/cli/docker/model/pull.md @@ -0,0 +1,16 @@ +--- +datafolder: model-cli +datafile: docker_model_pull +title: docker model pull +layout: cli +aliases: +- /engine/reference/commandline/model_pull/ +--- + + diff --git a/content/reference/cli/docker/model/push.md b/content/reference/cli/docker/model/push.md new file mode 100644 index 000000000000..d4ab1d7ed9df --- /dev/null +++ b/content/reference/cli/docker/model/push.md @@ -0,0 +1,16 @@ +--- +datafolder: model-cli +datafile: docker_model_push +title: docker model push +layout: cli +aliases: +- /engine/reference/commandline/model_push/ +--- + + diff --git a/content/reference/cli/docker/model/rm.md b/content/reference/cli/docker/model/rm.md new file mode 100644 index 000000000000..4eaefd048f8b --- /dev/null +++ b/content/reference/cli/docker/model/rm.md @@ -0,0 +1,16 @@ +--- +datafolder: model-cli +datafile: docker_model_rm +title: docker model rm +layout: cli +aliases: +- /engine/reference/commandline/model_rm/ +--- + + diff --git a/content/reference/cli/docker/model/run.md b/content/reference/cli/docker/model/run.md new file mode 100644 index 000000000000..83c820f4283b --- /dev/null +++ b/content/reference/cli/docker/model/run.md @@ -0,0 +1,16 @@ +--- +datafolder: model-cli +datafile: docker_model_run +title: docker model run +layout: cli +aliases: +- /engine/reference/commandline/model_run/ +--- + + diff --git a/content/reference/cli/docker/model/status.md b/content/reference/cli/docker/model/status.md new file mode 100644 index 000000000000..5ce4d3cd52a1 --- /dev/null +++ b/content/reference/cli/docker/model/status.md @@ -0,0 +1,16 @@ +--- +datafolder: model-cli +datafile: docker_model_status +title: docker model status +layout: cli +aliases: +- /engine/reference/commandline/model_status/ +--- + + diff --git a/content/reference/cli/docker/model/tag.md b/content/reference/cli/docker/model/tag.md new file mode 100644 index 000000000000..dd9ce9320b60 --- /dev/null +++ b/content/reference/cli/docker/model/tag.md @@ -0,0 +1,16 @@ +--- +datafolder: model-cli +datafile: docker_model_tag +title: docker model tag +layout: cli +aliases: +- /engine/reference/commandline/model_tag/ +--- + + diff --git a/content/reference/cli/docker/model/uninstall-runner.md b/content/reference/cli/docker/model/uninstall-runner.md new file mode 100644 index 000000000000..349541cd6f90 --- /dev/null +++ b/content/reference/cli/docker/model/uninstall-runner.md @@ -0,0 +1,16 @@ +--- +datafolder: model-cli +datafile: docker_model_uninstall-runner +title: docker model uninstall-runner +layout: cli +aliases: +- /engine/reference/commandline/model_uninstall-runner/ +--- + + diff --git a/content/reference/cli/docker/model/version.md b/content/reference/cli/docker/model/version.md new file mode 100644 index 000000000000..82f8cf035484 --- /dev/null +++ b/content/reference/cli/docker/model/version.md @@ -0,0 +1,16 @@ +--- +datafolder: model-cli +datafile: docker_model_version +title: docker model version +layout: cli +aliases: +- /engine/reference/commandline/model_version/ +--- + + diff --git a/content/reference/cli/docker/offload/_index.md b/content/reference/cli/docker/offload/_index.md new file mode 100644 index 000000000000..532a4edc59d6 --- /dev/null +++ b/content/reference/cli/docker/offload/_index.md @@ -0,0 +1,13 @@ +--- +datafolder: offload-cli +datafile: docker_offload +title: docker offload +layout: cli +params: + sidebar: + badge: + color: blue + text: Beta +--- + +{{< summary-bar feature_name="Docker Offload" >}} \ No newline at end of file diff --git a/content/reference/cli/docker/offload/accounts.md b/content/reference/cli/docker/offload/accounts.md new file mode 100644 index 000000000000..4d8fd8b3ee6b --- /dev/null +++ b/content/reference/cli/docker/offload/accounts.md @@ -0,0 +1,8 @@ +--- +datafolder: offload-cli +datafile: docker_offload_accounts +title: docker offload accounts +layout: cli +--- + +{{< summary-bar feature_name="Docker Offload" >}} diff --git a/content/reference/cli/docker/offload/diagnose.md b/content/reference/cli/docker/offload/diagnose.md new file mode 100644 index 000000000000..4adc30550bb2 --- /dev/null +++ b/content/reference/cli/docker/offload/diagnose.md @@ -0,0 +1,8 @@ +--- +datafolder: offload-cli +datafile: docker_offload_diagnose +title: docker offload diagnose +layout: cli +--- + +{{< summary-bar feature_name="Docker Offload" >}} diff --git a/content/reference/cli/docker/offload/start.md b/content/reference/cli/docker/offload/start.md new file mode 100644 index 000000000000..2269f47e8644 --- /dev/null +++ b/content/reference/cli/docker/offload/start.md @@ -0,0 +1,8 @@ +--- +datafolder: offload-cli +datafile: docker_offload_start +title: docker offload start +layout: cli +--- + +{{< summary-bar feature_name="Docker Offload" >}} diff --git a/content/reference/cli/docker/offload/status.md b/content/reference/cli/docker/offload/status.md new file mode 100644 index 000000000000..290101d50727 --- /dev/null +++ b/content/reference/cli/docker/offload/status.md @@ -0,0 +1,8 @@ +--- +datafolder: offload-cli +datafile: docker_offload_status +title: docker offload status +layout: cli +--- + +{{< summary-bar feature_name="Docker Offload" >}} diff --git a/content/reference/cli/docker/offload/stop.md b/content/reference/cli/docker/offload/stop.md new file mode 100644 index 000000000000..1a9af203efff --- /dev/null +++ b/content/reference/cli/docker/offload/stop.md @@ -0,0 +1,8 @@ +--- +datafolder: offload-cli +datafile: docker_offload_stop +title: docker offload stop +layout: cli +--- + +{{< summary-bar feature_name="Docker Offload" >}} diff --git a/content/reference/cli/docker/offload/version.md b/content/reference/cli/docker/offload/version.md new file mode 100644 index 000000000000..7f32e7ec23cd --- /dev/null +++ b/content/reference/cli/docker/offload/version.md @@ -0,0 +1,8 @@ +--- +datafolder: offload-cli +datafile: docker_offload_version +title: docker offload version +layout: cli +--- + +{{< summary-bar feature_name="Docker Offload" >}} diff --git a/content/reference/cli/docker/scout/integration/_index.md b/content/reference/cli/docker/scout/integration/_index.md index 55287abc2a8b..17e602764404 100644 --- a/content/reference/cli/docker/scout/integration/_index.md +++ b/content/reference/cli/docker/scout/integration/_index.md @@ -15,4 +15,4 @@ repository on GitHub: https://github.com/docker/scout-cli --> -{{< include "scout-early-access.md" >}} +{{% include "scout-early-access.md" %}} diff --git a/content/reference/cli/docker/scout/integration/configure.md b/content/reference/cli/docker/scout/integration/configure.md index 71ebe8c58e59..200efd02ee0e 100644 --- a/content/reference/cli/docker/scout/integration/configure.md +++ b/content/reference/cli/docker/scout/integration/configure.md @@ -15,4 +15,4 @@ repository on GitHub: https://github.com/docker/scout-cli --> -{{< include "scout-early-access.md" >}} +{{% include "scout-early-access.md" %}} diff --git a/content/reference/cli/docker/scout/integration/delete.md b/content/reference/cli/docker/scout/integration/delete.md index a63a11ec46b4..2de746e00289 100644 --- a/content/reference/cli/docker/scout/integration/delete.md +++ b/content/reference/cli/docker/scout/integration/delete.md @@ -15,4 +15,4 @@ repository on GitHub: https://github.com/docker/scout-cli --> -{{< include "scout-early-access.md" >}} +{{% include "scout-early-access.md" %}} diff --git a/content/reference/cli/docker/scout/integration/list.md b/content/reference/cli/docker/scout/integration/list.md index 31f1a3ae2dff..4271284acb1b 100644 --- a/content/reference/cli/docker/scout/integration/list.md +++ b/content/reference/cli/docker/scout/integration/list.md @@ -15,4 +15,4 @@ repository on GitHub: https://github.com/docker/scout-cli --> -{{< include "scout-early-access.md" >}} +{{% include "scout-early-access.md" %}} diff --git a/content/reference/compose-file/_index.md b/content/reference/compose-file/_index.md index f29f59f40235..68b7abdf77bc 100644 --- a/content/reference/compose-file/_index.md +++ b/content/reference/compose-file/_index.md @@ -36,6 +36,7 @@ aliases: - /compose/yaml/ - /compose/compose-file/compose-file-v1/ - /compose/compose-file/ + - /compose/reference/overview/ --- >**New to Docker Compose?** @@ -44,10 +45,15 @@ aliases: The Compose Specification is the latest and recommended version of the Compose file format. It helps you define a [Compose file](/manuals/compose/intro/compose-application-model.md) which is used to configure your Docker application’s services, networks, volumes, and more. -Legacy versions 2.x and 3.x of the Compose file format were merged into the Compose Specification. It is implemented in versions 1.27.0 and above (also known as Compose V2) of the Docker Compose CLI. +Legacy versions 2.x and 3.x of the Compose file format were merged into the Compose Specification. It is implemented in versions 1.27.0 and above (also known as Compose v2) of the Docker Compose CLI. The Compose Specification on Docker Docs is the Docker Compose implementation. If you wish to implement your own version of the Compose Specification, see the [Compose Specification repository](https://github.com/compose-spec/compose-spec). Use the following links to navigate key sections of the Compose Specification. +> [!TIP] +> +> Want a better editing experience for Compose files in VS Code? +> Check out the [Docker VS Code Extension (Beta)](https://marketplace.visualstudio.com/items?itemName=docker.docker) for linting, code navigation, and vulnerability scanning. + {{< grid >}} diff --git a/content/reference/compose-file/build.md b/content/reference/compose-file/build.md index 636ab89ec233..903f714f7525 100644 --- a/content/reference/compose-file/build.md +++ b/content/reference/compose-file/build.md @@ -7,7 +7,7 @@ aliases: weight: 130 --- -{{< include "compose/build.md" >}} +{{% include "compose/build.md" %}} In the former case, the whole path is used as a Docker context to execute a Docker build, looking for a canonical `Dockerfile` at the root of the directory. The path can be absolute or relative. If it is relative, it is resolved @@ -49,7 +49,7 @@ services: When used to build service images from source, the Compose file creates three Docker images: -* `example/webapp`: A Docker image is built using `webapp` sub-directory, within the Compose file's parent folder, as the Docker build context. Lack of a `Dockerfile` within this folder throws an error. +* `example/webapp`: A Docker image is built using `webapp` sub-directory, within the Compose file's parent folder, as the Docker build context. Lack of a `Dockerfile` within this folder returns an error. * `example/database`: A Docker image is built using `backend` sub-directory within the Compose file parent folder. `backend.Dockerfile` file is used to define build steps, this file is searched relative to the context path, which means `..` resolves to the Compose file's parent folder, so `backend.Dockerfile` is a sibling file. * A Docker image is built using the `custom` directory with the user's `$HOME` as the Docker context. Compose displays a warning about the non-portable path used to build image. @@ -83,7 +83,7 @@ Alternatively `build` can be an object with fields defined as follows: ### `additional_contexts` -{{< introduced compose 2.17.0 "/manuals/compose/releases/release-notes.md#2170" >}} +{{< summary-bar feature_name="Build additional contexts" >}} `additional_contexts` defines a list of named contexts the image builder should use during image build. @@ -118,6 +118,28 @@ the unused contexts. Illustrative examples of how this is used in Buildx can be found [here](https://github.com/docker/buildx/blob/master/docs/reference/buildx_build.md#-additional-build-contexts---build-context). +`additional_contexts` can also refer to an image built by another service. +This allows a service image to be built using another service image as a base image, and to share +layers between service images. + +```yaml +services: + base: + build: + context: . + dockerfile_inline: | + FROM alpine + RUN ... + my-service: + build: + context: . + dockerfile_inline: | + FROM base # image built for service base + RUN ... + additional_contexts: + base: service:base +``` + ### `args` `args` define build arguments, that is Dockerfile `ARG` values. @@ -229,7 +251,7 @@ build: ### `dockerfile_inline` -{{< introduced compose 2.17.0 "/manuals/compose/releases/release-notes.md#2170" >}} +{{< summary-bar feature_name="Build dockerfile inline" >}} `dockerfile_inline` defines the Dockerfile content as an inlined string in a Compose file. When set, the `dockerfile` attribute is not allowed and Compose rejects any Compose file having both set. @@ -246,7 +268,7 @@ build: ### `entitlements` -{{< introduced compose 2.27.1 "/manuals/compose/releases/release-notes.md#2271" >}} +{{< summary-bar feature_name="Build entitlements" >}} `entitlements` defines extra privileged entitlements to be allowed during the build. @@ -392,7 +414,7 @@ Composes reports an error in the following cases: ### `privileged` -{{< introduced compose 2.15.0 "/manuals/compose/releases/release-notes.md#2" >}} +{{< summary-bar feature_name="Build privileged" >}} `privileged` configures the service image to build with elevated privileges. Support and actual impacts are platform specific. @@ -444,8 +466,7 @@ The long syntax provides more granularity in how the secret is created within the service's containers. - `source`: The name of the secret as it exists on the platform. -- `target`: The name of the file to be mounted in `/run/secrets/` in the - service's task containers. Defaults to `source` if not specified. +- `target`: The ID of the secret as declared in the Dockerfile. Defaults to `source` if not specified. - `uid` and `gid`: The numeric uid or gid that owns the file within `/run/secrets/` in the service's task containers. Default value is `USER`. - `mode`: The [permissions](https://wintelguy.com/permissions-calc.pl) for the file to be mounted in `/run/secrets/` @@ -465,7 +486,7 @@ services: context: . secrets: - source: server-certificate - target: server.cert + target: cert # secret ID in Dockerfile uid: "103" gid: "103" mode: 0440 @@ -474,6 +495,12 @@ secrets: external: true ``` +```dockerfile +# Dockerfile +FROM nginx +RUN --mount=type=secret,id=cert,required=true,target=/root/cert ... +``` + Service builds may be granted access to multiple secrets. Long and short syntax for secrets may be used in the same Compose file. Defining a secret in the top-level `secrets` must not imply granting any service build access to it. Such grant must be explicit within service specification as [secrets](services.md#secrets) service element. @@ -555,7 +582,7 @@ build: ### `ulimits` -{{< introduced compose 2.23.1 "/manuals/compose/releases/release-notes.md#2231" >}} +{{< summary-bar feature_name="Build ulimits" >}} `ulimits` overrides the default `ulimits` for a container. It's specified either as an integer for a single limit or as mapping for soft/hard limits. diff --git a/content/reference/compose-file/configs.md b/content/reference/compose-file/configs.md index 1488b6a3f54c..99a22479e9ab 100644 --- a/content/reference/compose-file/configs.md +++ b/content/reference/compose-file/configs.md @@ -1,13 +1,13 @@ --- title: Configs top-level elements -description: Explore all the attributes the configs top-level element can have. +description: Manage and share configuration data using the configs element in Docker Compose. keywords: compose, compose specification, configs, compose file reference aliases: - /compose/compose-file/08-configs/ weight: 50 --- -{{< include "compose/configs.md" >}} +{{% include "compose/configs.md" %}} Services can only access configs when explicitly granted by a [`configs`](services.md#configs) attribute within the `services` top-level element. diff --git a/content/reference/compose-file/deploy.md b/content/reference/compose-file/deploy.md index c5a24f4d1b8d..b9b7f4bbd305 100644 --- a/content/reference/compose-file/deploy.md +++ b/content/reference/compose-file/deploy.md @@ -7,7 +7,7 @@ aliases: weight: 140 --- -{{< include "compose/deploy.md" >}} +{{% include "compose/deploy.md" %}} ## Attributes @@ -132,7 +132,7 @@ services: `resources` configures physical resource constraints for container to run on platform. Those constraints can be configured as: -- `limits`: The platform must prevent the container to allocate more. +- `limits`: The platform must prevent the container from allocating more resources. - `reservations`: The platform must guarantee the container can allocate at least the configured amount. ```yml @@ -254,11 +254,10 @@ deploy: - `on-failure`, the container is restarted if it exits due to an error, which manifests as a non-zero exit code. - `any` (default), containers are restarted regardless of the exit status. - `delay`: How long to wait between restart attempts, specified as a [duration](extension.md#specifying-durations). The default is 0, meaning restart attempts can occur immediately. -- `max_attempts`: How many times to attempt to restart a container before giving up (default: never give up). If the restart does not - succeed within the configured `window`, this attempt doesn't count toward the configured `max_attempts` value. - For example, if `max_attempts` is set to '2', and the restart fails on the first attempt, more than two restarts must be attempted. -- `window`: How long to wait before deciding if a restart has succeeded, specified as a [duration](extension.md#specifying-durations) (default: - decide immediately). +- `max_attempts`: The maximum number of failed restart attempts allowed before giving up. (Default: unlimited retries.) +A failed attempt only counts toward `max_attempts` if the container does not successfully restart within the time defined by `window`. +For example, if `max_attempts` is set to `2` and the container fails to restart within the window on the first try, Compose continues retrying until two such failed attempts occur, even if that means trying more than twice. +- `window`: The amount of time to wait after a restart to determine whether it was successful, specified as a [duration](extension.md#specifying-durations) (default: the result is evaluated immediately after the restart). ```yml deploy: @@ -271,7 +270,7 @@ deploy: ### `rollback_config` -`rollback_config` configures how the service should be rollbacked in case of a failing update. +`rollback_config` configures how the service should be rolled back in case of a failing update. - `parallelism`: The number of containers to rollback at a time. If set to 0, all containers rollback simultaneously. - `delay`: The time to wait between each container group's rollback (default 0s). diff --git a/content/reference/compose-file/develop.md b/content/reference/compose-file/develop.md index 7bc771f103a2..6ad7de6e04aa 100644 --- a/content/reference/compose-file/develop.md +++ b/content/reference/compose-file/develop.md @@ -11,7 +11,7 @@ weight: 150 > > Develop is an optional part of the Compose Specification. It is available with Docker Compose version 2.22.0 and later. -{{< include "compose/develop.md" >}} +{{% include "compose/develop.md" %}} This page defines how Compose behaves to efficiently assist you and defines the development constraints and workflows set by Compose. Only a subset of Compose file services may require a `develop` subsection. @@ -43,8 +43,6 @@ services: ## Attributes - - The `develop` subsection defines configuration options that are applied by Compose to assist you during development of a service with optimized workflows. ### `watch` @@ -64,9 +62,9 @@ Compose to monitor source code for changes. For more information, see [Use Compo #### `exec` -{{< introduced compose 2.23.2 "/manuals/compose/releases/release-notes.md#2232" >}} +{{< summary-bar feature_name="Compose exec" >}} -`exec` is only relevant when `action` is set to `sync+exec`. Like [service hooks](services.md#post_start), `exec` is used to define the command to be run inside the container once it has started. +`exec` is only relevant when `action` is set to `sync+exec`. Like [service hooks](services.md#post_start), `exec` is used to define the command to be run inside the container once it has started. - `command`: Specifies the command to run once the container starts. This attribute is required, and you can choose to use either the shell form or the exec form. - `user`: The user to run the command. If not set, the command is run with the same user as the main service command. @@ -90,7 +88,7 @@ services: #### `ignore` -The `ignore` attribute can be used to define a list of patterns for paths to be ignored. Any updated file +The `ignore` attribute is used to define a list of patterns for paths to be ignored. Any updated file that matches a pattern, or belongs to a folder that matches a pattern, won't trigger services to be re-created. The syntax is the same as `.dockerignore` file: @@ -102,6 +100,30 @@ The syntax is the same as `.dockerignore` file: If the build context includes a `.dockerignore` file, the patterns in this file is loaded as implicit content for the `ignores` file, and values set in the Compose model are appended. +#### `include` + +It is sometimes easier to select files to be watched instead of declaring those that shouldn't be watched with `ignore`. + +The `include` attribute is used to define a pattern, or a list of patterns, for paths to be considered for watching. +Only files that match these patterns will be considered when applying a watch rule. The syntax is the same as `ignore`. + +```yaml +services: + backend: + image: example/backend + develop: + watch: + # rebuild image and recreate service + - path: ./src + include: "*.go" + action: rebuild +``` + +> [!NOTE] +> +> In many cases `include` patterns start with a wildcard (`*`) character. This has special meaning in YAML syntax +> to define an [alias node](https://yaml.org/spec/1.2.2/#alias-nodes) so you have to wrap pattern expression with quotes. + #### `path` `path` attribute defines the path to source code (relative to the project directory) to monitor for changes. Updates to any file diff --git a/content/reference/compose-file/extension.md b/content/reference/compose-file/extension.md index dff790fcc8cd..b7dde48cd9df 100644 --- a/content/reference/compose-file/extension.md +++ b/content/reference/compose-file/extension.md @@ -1,13 +1,13 @@ --- title: Extensions -description: Understand how to use extensions +description: Define and reuse custom fragments with extensions in Docker Compose keywords: compose, compose specification, extensions, compose file reference aliases: - /compose/compose-file/11-extension/ weight: 80 --- -{{< include "compose/extension.md" >}} +{{% include "compose/extension.md" %}} Extensions can also be used with [anchors and aliases](fragments.md). @@ -117,7 +117,7 @@ services: > > In the example above, the environment variables are declared using the `FOO: BAR` mapping syntax, while the sequence syntax `- FOO=BAR` is only valid when no fragments are involved. -## Informative Historical Notes +## Informative historical notes This section is informative. At the time of writing, the following prefixes are known to exist: diff --git a/content/reference/compose-file/fragments.md b/content/reference/compose-file/fragments.md index e82e786614ea..d69e639a4546 100644 --- a/content/reference/compose-file/fragments.md +++ b/content/reference/compose-file/fragments.md @@ -1,13 +1,13 @@ --- title: Fragments -description: Understand how to use fragments +description: Reuse configuration with YAML anchors and fragments keywords: compose, compose specification, fragments, compose file reference aliases: - /compose/compose-file/10-fragments/ weight: 70 --- -{{< include "compose/fragments.md" >}} +{{% include "compose/fragments.md" %}} Anchors are created using the `&` sign. The sign is followed by an alias name. You can use this alias with the `*` sign later to reference the value following the anchor. Make sure there is no space between the `&` and the `*` characters and the following alias name. diff --git a/content/reference/compose-file/include.md b/content/reference/compose-file/include.md index 845916699032..f1a01d925318 100644 --- a/content/reference/compose-file/include.md +++ b/content/reference/compose-file/include.md @@ -1,29 +1,30 @@ --- -title: Include -description: Learn about include +linkTitle: Include +title: Use include to modularize Compose files +description: Reference external Compose files using the include top-level element keywords: compose, compose specification, include, compose file reference -aliases: +aliases: - /compose/compose-file/14-include/ weight: 110 --- -{{< introduced compose 2.20.0 "/manuals/compose/releases/release-notes.md#2200" >}} +{{< summary-bar feature_name="Composefile include" >}} -A Compose application can declare dependency on another Compose application. This is useful if: +You can reuse and modularize Docker Compose configurations by including other Compose files. This is useful if: - You want to reuse other Compose files. - You need to factor out parts of your application model into separate Compose files so they can be managed separately or shared with others. -- Teams need to keep a Compose file reasonably complicated for the limited amount of resources it has to declare for its own sub-domain within a larger deployment. +- Teams need to maintain a Compose file with only necessary complexity for the limited amount of resources it has to declare for its own sub-domain within a larger deployment. The `include` top-level section is used to define the dependency on another Compose application, or sub-domain. -Each path listed in the `include` section is loaded as an individual Compose application model, with its own project directory, in order to resolve relative paths. +Each path listed in the `include` section is loaded as an individual Compose application model, with its own project directory, in order to resolve relative paths. -Once the included Compose application is loaded, all resource definitions are copied into the -current Compose application model. Compose displays a warning if resource names conflict and doesn't -try to merge them. To enforce this, `include` is evaluated after the Compose file(s) selected -to define the Compose application model have been parsed and merged, so that conflicts +Once the included Compose application is loaded, all resource definitions are copied into the +current Compose application model. Compose displays a warning if resource names conflict and doesn't +try to merge them. To enforce this, `include` is evaluated after the Compose file(s) selected +to define the Compose application model have been parsed and merged, so that conflicts between Compose files are detected. -`include` applies recursively so an included Compose file which declares its own `include` section triggers those other files to be included as well. +`include` applies recursively so an included Compose file which declares its own `include` section triggers those other files to be included as well. Any volumes, networks, or other resources pulled in from the included Compose file can be used by the current Compose application for cross-service references. For example: @@ -48,7 +49,7 @@ include: The short syntax only defines paths to other Compose files. The file is loaded with the parent folder as the project directory, and an optional `.env` file that is loaded to define any variables' default values -by interpolation. The local project's environment can override those values. +by interpolation. The local project's environment can override those values. ```yaml include: @@ -61,9 +62,9 @@ services: - included-service # defined by another_domain ``` -In the previous example, both `../commons/compose.yaml` and -`../another_domain/compose.yaml` are loaded as individual Compose projects. Relative paths -in Compose files being referred by `include` are resolved relative to their own Compose +In the previous example, both `../commons/compose.yaml` and +`../another_domain/compose.yaml` are loaded as individual Compose projects. Relative paths +in Compose files being referred by `include` are resolved relative to their own Compose file path, not based on the local project's directory. Variables are interpolated using values set in the optional `.env` file in same folder and are overridden by the local project's environment. @@ -90,21 +91,21 @@ local Compose model. ```yaml include: - - path: + - path: - ../commons/compose.yaml - ./commons-override.yaml ``` ### `project_directory` -`project_directory` defines a base path to resolve relative paths set in the Compose file. It defaults to +`project_directory` defines a base path to resolve relative paths set in the Compose file. It defaults to the directory of the included Compose file. ### `env_file` `env_file` defines an environment file(s) to use to define default values when interpolating variables -in the Compose file being parsed. It defaults to `.env` file in the `project_directory` for the Compose -file being parsed. +in the Compose file being parsed. It defaults to `.env` file in the `project_directory` for the Compose +file being parsed. `env_file` can be set to either a string or a list of strings when multiple environment files need to be merged to define a project environment. diff --git a/content/reference/compose-file/interpolation.md b/content/reference/compose-file/interpolation.md index b897bcd8923b..24727daf3f81 100644 --- a/content/reference/compose-file/interpolation.md +++ b/content/reference/compose-file/interpolation.md @@ -1,13 +1,13 @@ --- title: Interpolation -description: Learn about interpolation +description: Substitute environment variables in Docker Compose files using interpolation syntax. keywords: compose, compose specification, interpolation, compose file reference aliases: - /compose/compose-file/12-interpolation/ weight: 90 --- -{{< include "compose/interpolation.md" >}} +{{% include "compose/interpolation.md" %}} For braced expressions, the following formats are supported: - Direct substitution diff --git a/content/reference/compose-file/merge.md b/content/reference/compose-file/merge.md index 72ce75bb7891..3c1cb1f41164 100644 --- a/content/reference/compose-file/merge.md +++ b/content/reference/compose-file/merge.md @@ -1,13 +1,14 @@ --- -title: Merge -description: Learn about merging rules +linkTitle: Merge +title: Merge Compose files +description: Understand how Docker Compose merges multiple files and resolves conflicts keywords: compose, compose specification, merge, compose file reference aliases: - /compose/compose-file/13-merge/ weight: 100 --- -{{< include "compose/merge.md" >}} +{{% include "compose/merge.md" %}} These rules are outlined below. @@ -107,8 +108,8 @@ While these types are modeled in a Compose file as a sequence, they have special | Attribute | Unique key | |-------------|--------------------------| | volumes | target | -| secrets | source | -| configs | source | +| secrets | target | +| configs | target | | ports | {ip, target, published, protocol} | When merging Compose files, Compose appends new entries that do not violate a uniqueness constraint and merge entries that share a unique key. @@ -142,7 +143,7 @@ services: In addition to the previously described mechanism, an override Compose file can also be used to remove elements from your application model. For this purpose, the custom [YAML tag](https://yaml.org/spec/1.2.2/#24-tags) `!reset` can be set to -override a value set by the overriden Compose file. A valid value for attribute must be provided, +override a value set by the overridden Compose file. A valid value for attribute must be provided, but will be ignored and target attribute will be set with type's default value or `null`. For readability, it is recommended to explicitly set the attribute value to the null (`null`) or empty @@ -161,7 +162,7 @@ services: FOO: BAR ``` -And an `compose.override.yaml` file: +And a `compose.override.yaml` file: ```yaml services: @@ -182,9 +183,9 @@ services: ### Replace value -{{< introduced compose 2.24.4 "/manuals/compose/releases/release-notes.md#2244" >}} +{{< summary-bar feature_name="Compose replace file" >}} -While `!reset` can be used to remove a declaration from a Compose file using an override file, `!override` allows you +While `!reset` can be used to remove a declaration from a Compose file using an override file, `!override` allows you to fully replace an attribute, bypassing the standard merge rules. A typical example is to fully replace a resource definition, to rely on a distinct model but using the same name. A base `compose.yaml` file: @@ -194,7 +195,7 @@ services: app: image: myapp ports: - - "8080:80" + - "8080:80" ``` To remove the original port, but expose a new one, the following override file is used: diff --git a/content/reference/compose-file/models.md b/content/reference/compose-file/models.md new file mode 100644 index 000000000000..8331a57e2bcc --- /dev/null +++ b/content/reference/compose-file/models.md @@ -0,0 +1,65 @@ +--- +title: Models +description: Learn about the models top-level element +keywords: compose, compose specification, models, compose file reference +weight: 130 +--- + +{{< summary-bar feature_name="Compose models" >}} + +The top-level `models` section declares AI models that are used by your Compose application. These models are typically pulled as OCI artifacts, run by a model runner, and exposed as an API that your service containers can consume. + +Services can only access models when explicitly granted by a [`models` attribute](services.md#models) within the `services` top-level element. + +## Examples + +### Example 1 + +```yaml +services: + app: + image: app + models: + - ai_model + + +models: + ai_model: + model: ai/model +``` + +In this basic example: + + - The app service uses the `ai_model`. + - The `ai_model` is defined as an OCI artifact (`ai/model`) that is pulled and served by the model runner. + - Docker Compose injects connection info, for example `AI_MODEL_URL`, into the container. + +### Example 2 + +```yaml +services: + app: + image: app + models: + my_model: + endpoint_var: MODEL_URL + +models: + my_model: + model: ai/model + context_size: 1024 + runtime_flags: + - "--a-flag" + - "--another-flag=42" +``` + +In this advanced setup: + + - The service app references `my_model` using the long syntax. + - Compose injects the model runner's URL as the environment variable `MODEL_URL`. + +## Attributes + +- `model` (required): The OCI artifact identifier for the model. This is what Compose pulls and runs via the model runner. +- `context_size`: Defines the maximum token context size for the model. +- `runtime_flags`: A list of raw command-line flags passed to the inference engine when the model is started. \ No newline at end of file diff --git a/content/reference/compose-file/networks.md b/content/reference/compose-file/networks.md index 4812272295d4..f70cdbde6cd4 100644 --- a/content/reference/compose-file/networks.md +++ b/content/reference/compose-file/networks.md @@ -1,13 +1,14 @@ --- -title: Networks top-level elements -description: Explore all the attributes the networks top-level element can have. +linkTitle: Networks +title: Define and manage networks in Docker Compose +description: Learn how to configure and control networks using the top-level networks element in Docker Compose. keywords: compose, compose specification, networks, compose file reference aliases: - /compose/compose-file/06-networks/ weight: 30 --- -{{< include "compose/networks.md" >}} +{{% include "compose/networks.md" %}} To use a network across multiple services, you must explicitly grant each service access by using the [networks](services.md) attribute within the `services` top-level element. The `networks` top-level element has additional syntax that provides more granular control. @@ -60,7 +61,41 @@ networks: driver: custom-driver ``` -The advanced example shows a Compose file which defines two custom networks. The `proxy` service is isolated from the `db` service, because they do not share a network in common. Only `app` can talk to both. +This example shows a Compose file which defines two custom networks. The `proxy` service is isolated from the `db` service, because they do not share a network in common. Only `app` can talk to both. + +## The default network + +When a Compose file doesn't declare explicit networks, Compose uses an implicit `default` network. Services without an explicit [`networks`](services.md#networks) declaration are connected by Compose to this `default` network: + + +```yml +services: + some-service: + image: foo +``` +This example is actually equivalent to: + +```yml +services: + some-service: + image: foo + networks: + default: {} +networks: + default: {} +``` + +You can customize the `default` network with an explicit declaration: + +```yml +networks: + default: + name: a_network # Use a custom name + driver_opts: # pass options to driver for network creation + com.docker.network.bridge.host_binding_ipv4: 127.0.0.1 +``` + +For options, see the [Docker Engine docs](https://docs.docker.com/engine/network/drivers/bridge/#options). ## Attributes @@ -105,9 +140,28 @@ networks: attachable: true ``` +### `enable_ipv4` + +{{< summary-bar feature_name="Compose enable ipv4" >}} + +`enable_ipv4` can be used to disable IPv4 address assignment. + +```yml + networks: + ip6net: + enable_ipv4: false + enable_ipv6: true +``` + ### `enable_ipv6` -`enable_ipv6` enables IPv6 networking. For an example, see step four of [Create an IPv6 network](/manuals/engine/daemon/ipv6.md). +`enable_ipv6` enables IPv6 address assignment. + +```yml + networks: + ip6net: + enable_ipv6: true +``` ### `external` @@ -117,7 +171,7 @@ Compose doesn't attempt to create these networks, and returns an error if one do - All other attributes apart from name are irrelevant. If Compose detects any other attribute, it rejects the Compose file as invalid. In the following example, `proxy` is the gateway to the outside world. Instead of attempting to create a network, Compose -queries the platform for an existing network simply called `outside` and connects the +queries the platform for an existing network called `outside` and connects the `proxy` service's containers to it. ```yml diff --git a/content/reference/compose-file/profiles.md b/content/reference/compose-file/profiles.md index 144c6fd75628..37acdcf00580 100644 --- a/content/reference/compose-file/profiles.md +++ b/content/reference/compose-file/profiles.md @@ -1,5 +1,6 @@ --- -title: Profiles +linkTitle: Profiles +title: Learn how to use profiles in Docker Compose description: Learn about profiles keywords: compose, compose specification, profiles, compose file reference aliases: @@ -52,7 +53,7 @@ services: In the above example: -- If the Compose application model is parsed with no profile enabled, it only contains the `web` service. +- If the Compose application model is parsed when no profile is enabled, it only contains the `web` service. - If the profile `test` is enabled, the model contains the services `test_lib` and `coverage_lib`, and service `web`, which is always enabled. - If the profile `debug` is enabled, the model contains both `web` and `debug_lib` services, but not `test_lib` and `coverage_lib`, and as such the model is invalid regarding the `depends_on` constraint of `debug_lib`. @@ -68,4 +69,4 @@ In the above example: profile `debug` is automatically enabled and service `test_lib` is pulled in as a dependency starting both services `debug_lib` and `test_lib`. -See how you can use `profiles` in [Docker Compose](/manuals/compose/how-tos/profiles.md). +Learn how to use `profiles` in [Docker Compose](/manuals/compose/how-tos/profiles.md). diff --git a/content/reference/compose-file/secrets.md b/content/reference/compose-file/secrets.md index 5fe118b77b20..f198623a31eb 100644 --- a/content/reference/compose-file/secrets.md +++ b/content/reference/compose-file/secrets.md @@ -1,5 +1,5 @@ --- -title: Secrets top-level elements +title: Secrets description: Explore all the attributes the secrets top-level element can have. keywords: compose, compose specification, secrets, compose file reference aliases: diff --git a/content/reference/compose-file/services.md b/content/reference/compose-file/services.md index 56dae4b39209..2e9a14532858 100644 --- a/content/reference/compose-file/services.md +++ b/content/reference/compose-file/services.md @@ -1,5 +1,6 @@ --- -title: Services top-level elements +linkTitle: Services +title: Define services in Docker Compose description: Explore all the attributes the services top-level element can have. keywords: compose, compose specification, services, compose file reference aliases: @@ -7,7 +8,7 @@ aliases: weight: 20 --- -{{< include "compose/services.md" >}} +{{% include "compose/services.md" %}} A Compose file must declare a `services` top-level element as a map whose keys are string representations of service names, and whose values are service definitions. A service definition contains the configuration that is applied to each @@ -43,9 +44,9 @@ services: POSTGRES_DB: exampledb ``` -### Advanced example +### Advanced example -In the following example, the `proxy` service uses the Nginx image, mounts a local Nginx configuration file into the container, exposes port `80` and depends on the `backend` service. +In the following example, the `proxy` service uses the Nginx image, mounts a local Nginx configuration file into the container, exposes port `80` and depends on the `backend` service. The `backend` service builds an image from the Dockerfile located in the `backend` directory that is set to build at stage `builder`. @@ -91,7 +92,7 @@ annotations: ### `attach` -{{< introduced compose 2.20.0 "/manuals/compose/releases/release-notes.md#2200" >}} +{{< summary-bar feature_name="Compose attach" >}} When `attach` is defined and set to `false` Compose does not collect service logs, until you explicitly request it to. @@ -233,7 +234,7 @@ cap_drop: ### `cgroup` -{{< introduced compose 2.15.0 "/manuals/compose/releases/release-notes.md#2150" >}} +{{< summary-bar feature_name="Compose cgroup" >}} `cgroup` specifies the cgroup namespace to join. When unset, it is the container runtime's decision to select which cgroup namespace to use, if supported. @@ -257,19 +258,24 @@ cgroup_parent: m-executor-abcd command: bundle exec thin -p 3000 ``` -The value can also be a list, in a manner similar to [Dockerfile](https://docs.docker.com/reference/dockerfile/#cmd): - -```yaml -command: [ "bundle", "exec", "thin", "-p", "3000" ] -``` - If the value is `null`, the default command from the image is used. If the value is `[]` (empty list) or `''` (empty string), the default command declared by the image is ignored, or in other words overridden to be empty. +> [!NOTE] +> +> Unlike the `CMD` instruction in a Dockerfile, the `command` field doesn't automatically run within the context of the [`SHELL`](/reference/dockerfile.md#shell-form) instruction defined in the image. If your `command` relies on shell-specific features, such as environment variable expansion, you need to explicitly run it within a shell. For example: +> +> ```yaml +> command: /bin/sh -c 'echo "hello $$HOSTNAME"' +> ``` + +The value can also be a list, similar to the [exec-form syntax](/reference/dockerfile.md#exec-form) +used by the [Dockerfile](/reference/dockerfile.md#exec-form). + ### `configs` -`configs` let services adapt their behaviour without the need to rebuild a Docker image. +`configs` let services adapt their behaviour without the need to rebuild a Docker image. Services can only access configs when explicitly granted by the `configs` attribute. Two different syntax variants are supported. Compose reports an error if `config` doesn't exist on the platform or isn't defined in the @@ -282,7 +288,7 @@ You can grant a service access to multiple configs, and you can mix long and sho #### Short syntax The short syntax variant only specifies the config name. This grants the -container access to the config and mounts it as files into a service’s container’s filesystem. The location of the mount point within the container defaults to `/` in Linux containers, and `C:\` in Windows containers. +container access to the config and mounts it as files into a service’s container’s filesystem. The location of the mount point within the container defaults to `/` in Linux containers, and `C:\` in Windows containers. The following example uses the short syntax to grant the `redis` service access to the `my_config` and `my_other_config` configs. The value of @@ -371,7 +377,9 @@ credential_spec: When using `registry:`, the credential spec is read from the Windows registry on the daemon's host. A registry value with the given name must be located in: - HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs +```bash +HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs +``` The following example loads the credential spec from a value named `my-credential-spec` in the registry: @@ -395,12 +403,12 @@ services: configs: my_credentials_spec: - file: ./my-credential-spec.json| + file: ./my-credential-spec.json ``` ### `depends_on` -{{< include "compose/services-depends-on.md" >}} +{{% include "compose/services-depends-on.md" %}} #### Short syntax @@ -491,7 +499,7 @@ Compose guarantees dependency services marked with ### `develop` -{{< introduced compose 2.22.0 "/manuals/compose/releases/release-notes.md#2220" >}} +{{< summary-bar feature_name="Compose develop" >}} `develop` specifies the development configuration for maintaining a container in sync with source, as defined in the [Development Section](develop.md). @@ -518,6 +526,13 @@ devices: - "/dev/sda:/dev/xvda:rwm" ``` +`devices` can also rely on the [CDI](https://github.com/cncf-tags/container-device-interface) syntax to let the container runtime select a device: + +```yml +devices: + - "vendor1.com/device=gpu" +``` + ### `dns` `dns` defines custom DNS servers to set on the container network interface configuration. It can be a single value or a list. @@ -562,7 +577,7 @@ dns_search: ### `driver_opts` -{{< introduced compose 2.27.1 "/manuals/compose/releases/release-notes.md#2271" >}} +{{< summary-bar feature_name="Compose driver opts" >}} `driver_opts` specifies a list of options as key-value pairs to pass to the driver. These options are driver-dependent. @@ -612,7 +627,7 @@ If the value is `[]` (empty list) or `''` (empty string), the default entrypoint ### `env_file` -{{< include "compose/services-env-file.md" >}} +{{% include "compose/services-env-file.md" %}} ```yml env_file: .env @@ -638,7 +653,7 @@ attributes. #### `required` -{{< introduced compose 2.24.0 "/manuals/compose/releases/release-notes.md#2240" >}} +{{< summary-bar feature_name="Compose required" >}} The `required` attribute defaults to `true`. When `required` is set to `false` and the `.env` file is missing, Compose silently ignores the entry. @@ -652,11 +667,11 @@ env_file: #### `format` -{{< introduced compose 2.30.0 "/manuals/compose/releases/release-notes.md#2300" >}} +{{< summary-bar feature_name="Compose format" >}} The `format` attribute lets you use an alternative file format for the `env_file`. When not set, `env_file` is parsed according to the Compose rules outlined in [`Env_file` format](#env_file-format). -`raw` format lets you use an `env_file` with key=value items, but without any attempt from Compose to parse the value for interpolation. +`raw` format lets you use an `env_file` with key=value items, but without any attempt from Compose to parse the value for interpolation. This let you pass values as-is, including quotes and `$` signs. ```yml @@ -704,7 +719,7 @@ VAR="quoted" ### `environment` -{{< include "compose/services-environment.md" >}} +{{% include "compose/services-environment.md" %}} Environment variables can be declared by a single key (no value to equals sign). In this case Compose relies on you to resolve the value. If the value is not resolved, the variable @@ -748,7 +763,7 @@ expose: > [!NOTE] > -> If the Dockerfile for the image already exposes ports, it is visible to other containers on the network even if `expose` is not set in your Compose file. +> If the Dockerfile for the image already exposes ports, it is visible to other containers on the network even if `expose` is not set in your Compose file. ### `extends` @@ -766,11 +781,11 @@ extends: - `service`: Defines the name of the service being referenced as a base, for example `web` or `database`. - `file`: The location of a Compose configuration file defining that service. -When a service uses `extends`, it can also specify dependencies on other resources, an explicit `volumes` declaration for instance. However, it's important to note that `extends` does not automatically incorporate the target volume definition into the extending Compose file. Instead, you are responsible for ensuring that an equivalent resource exists for the service being extended to maintain consistency. Docker Compose verifies that a resource with the referenced ID is present within the Compose model. +#### Restrictions -Dependencies on other resources in an `extends` target can be: -- An explicit reference by `volumes`, `networks`, `configs`, `secrets`, `links`, `volumes_from` or `depends_on` -- A reference to another service using the `service:{name}` syntax in namespace declaration (`ipc`, `pid`, `network_mode`) +When a service is referenced using `extends`, it can declare dependencies on other resources. These dependencies may be explicitly defined through attributes like `volumes`, `networks`, `configs`, `secrets`, `links`, `volumes_from`, or `depends_on`. Alternatively, dependencies can reference another service using the `service:{name}` syntax in namespace declarations such as `ipc`, `pid`, or `network_mode`. + +Compose does not automatically import these referenced resources into the extended model. It is your responsibility to ensure all required resources are explicitly declared in the model that relies on extends. Circular references with `extends` are not supported, Compose returns an error when one is detected. @@ -1000,6 +1015,29 @@ configuration, which means for Linux `/etc/hosts` get extra lines: ::1 myhostv6 ``` +### `gpus` + +{{< summary-bar feature_name="Compose gpus" >}} + +`gpus` specifies GPU devices to be allocated for container usage. This is equivalent to a [device request](deploy.md#devices) with +an implicit `gpu` capability. + +```yaml +services: + model: + gpus: + - driver: 3dfx + count: 2 +``` + +`gpus` also can be set as string `all` to allocate all available GPU devices to the container. + +```yaml +services: + model: + gpus: all +``` + ### `group_add` `group_add` specifies additional groups, by name or number, which the user inside the container must be a member of. @@ -1021,7 +1059,7 @@ been the case if `group_add` were not declared. ### `healthcheck` -{{< include "compose/services-healthcheck.md" >}} +{{% include "compose/services-healthcheck.md" %}} For more information on `HEALTHCHECK`, see the [Dockerfile reference](/reference/dockerfile.md#healthcheck). @@ -1153,7 +1191,7 @@ results in a runtime error. ### `label_file` -{{< introduced compose 2.23.2 "/manuals/compose/releases/release-notes.md#2232" >}} +{{< summary-bar feature_name="Compose label file" >}} The `label_file` attribute lets you load labels for a service from an external file or a list of files. This provides a convenient way to manage multiple labels without cluttering the Compose file. @@ -1162,10 +1200,10 @@ The file uses a key-value format, similar to `env_file`. You can specify multipl ```yaml services: one: - label_file: ./app.labels - + label_file: ./app.labels + two: - label_file: + label_file: - ./app.labels - ./additional.labels ``` @@ -1253,14 +1291,49 @@ There is a performance penalty for applications that swap memory to disk often. - If `memswap_limit` is unset, and `memory` is set, the container can use as much swap as the `memory` setting, if the host container has swap memory configured. For instance, if `memory`="300m" and `memswap_limit` is not set, the container can use 600m in total of memory and swap. - If `memswap_limit` is explicitly set to -1, the container is allowed to use unlimited swap, up to the amount available on the host system. +### `models` + +{{< summary-bar feature_name="Compose models" >}} + +`models` defines which AI models the service should use at runtime. Each referenced model must be defined under the [`models` top-level element](models.md). + +```yaml +services: + short_syntax: + image: app + models: + - my_model + long_syntax: + image: app + models: + my_model: + endpoint_var: MODEL_URL + model_var: MODEL +``` + +When a service is linked to a model, Docker Compose injects environment variables to pass connection details and model identifiers to the container. This allows the application to locate and communicate with the model dynamically at runtime, without hard-coding values. + +#### Long syntax + +The long syntax gives you more control over the environment variable names. + +- `endpoint_var` sets the name of the environment variable that holds the model runner’s URL. +- `model_var` sets the name of the environment variable that holds the model identifier. + +If either is omitted, Compose automatically generates the environment variable names based on the model key using the following rules: + + - Convert the model key to uppercase + - Replace any '-' characters with '_' + - Append `_URL` for the endpoint variable + ### `network_mode` -`network_mode` sets a service container's network mode. +`network_mode` sets a service container's network mode. - `none`: Turns off all container networking. - `host`: Gives the container raw access to the host's network interface. -- `service:{name}`: Gives the container access to the specified container by referring to its service name. -- `container:{name}`: Gives the container access to the specified container by referring to its container ID. +- `service:{name}`: Gives the container access to the specified container by referring to its service name. +- `container:{name}`: Gives the container access to the specified container by referring to its container ID. For more information container networks, see the [Docker Engine documentation](/manuals/engine/network/_index.md#container-networks). @@ -1275,7 +1348,7 @@ Compose file containing both attributes. ### `networks` -{{< include "compose/services-networks.md" >}} +{{% include "compose/services-networks.md" %}} ```yml services: @@ -1286,6 +1359,28 @@ services: ``` For more information about the `networks` top-level element, see [Networks](networks.md). +#### Implicit default network + +If `networks` is empty or absent from the Compose file, Compose considers an implicit definition for the service to be +connected to the `default` network: + +```yml +services: + some-service: + image: foo +``` +This example is actually equivalent to: + +```yml +services: + some-service: + image: foo + networks: + default: {} +``` + +If you want the service to not be connected a network, you must set [`network_mode: none`](#network_mode). + #### `aliases` `aliases` declares alternative hostnames for the service on the network. Other containers on the same @@ -1338,9 +1433,31 @@ services: - mysql networks: - front-tier: - back-tier: - admin: + front-tier: {} + back-tier: {} + admin: {} +``` + +### `interface_name` + +{{< summary-bar feature_name="Compose interface-name" >}} + +`interface_name` lets you specify the name of the network interface used to connect a service to a given network. This ensures consistent and predictable interface naming across services and networks. + +```yaml +services: + backend: + image: alpine + command: ip link show + networks: + back-tier: + interface_name: eth0 +``` + +Running the example Compose application shows: + +```console +backend-1 | 11: eth0@if64: mtu 1500 qdisc noqueue state UP ``` #### `ipv4_address`, `ipv6_address` @@ -1393,16 +1510,50 @@ networks: #### `mac_address` -{{< introduced compose 2.23.2 "/manuals/compose/releases/release-notes.md#2232" >}} +{{< summary-bar feature_name="Compose mac address" >}} `mac_address` sets the Mac address used by the service container when connecting to this particular network. +#### `gw_priority` + +{{< summary-bar feature_name="Compose gw priority" >}} + +The network with the highest `gw_priority` is selected as the default gateway for the service container. +If unspecified, the default value is 0. + +In the following example, `app_net_2` will be selected as the default gateway. + +```yaml +services: + app: + image: busybox + command: top + networks: + app_net_1: + app_net_2: + gw_priority: 1 + app_net_3: +networks: + app_net_1: + app_net_2: + app_net_3: +``` + #### `priority` `priority` indicates in which order Compose connects the service’s containers to its networks. If unspecified, the default value is 0. -In the following example, the app service connects to `app_net_1` first as it has the highest priority. It then connects to `app_net_3`, then `app_net_2`, which uses the default priority value of 0. +If the container runtime accepts a `mac_address` attribute at service level, it is +applied to the network with the highest `priority`. In other cases, use attribute +`networks.mac_address`. + +`priority` does not affect which network is selected as the default gateway. Use the +[`gw_priority`](#gw_priority) attribute instead. + +`priority` does not control the order in which networks connections are added to +the container, it cannot be used to determine the device name (`eth0` etc.) in the +container. ```yaml services: @@ -1464,7 +1615,7 @@ platform: linux/arm64/v8 ### `ports` -{{< include "compose/services-ports.md" >}} +{{% include "compose/services-ports.md" %}} > [!NOTE] > @@ -1477,11 +1628,11 @@ in the form: `[HOST:]CONTAINER[/PROTOCOL]` where: -- `HOST` is `[IP:](port | range)` (optional). If it is not set, it binds to all network interfaces (`0.0.0.0`). +- `HOST` is `[IP:](port | range)` (optional). If it is not set, it binds to all network interfaces (`0.0.0.0`). - `CONTAINER` is `port | range`. -- `PROTOCOL` restricts ports to a specified protocol either `tcp` or `upd`(optional). Default is `tcp`. +- `PROTOCOL` restricts ports to a specified protocol either `tcp` or `udp`(optional). Default is `tcp`. -Ports can be either a single value or a range. `HOST` and `CONTAINER` must use equivalent ranges. +Ports can be either a single value or a range. `HOST` and `CONTAINER` must use equivalent ranges. You can either specify both ports (`HOST:CONTAINER`), or just the container port. In the latter case, the container runtime automatically allocates any unassigned port of the host. @@ -1502,10 +1653,10 @@ ports: - "49100:22" - "8000-9000:80" - "127.0.0.1:8001:8001" - - "127.0.0.1:5000-5010:5000-5010" - - "::1:6000:6000" - - "[::1]:6001:6001" - - "6060:6060/udp" + - "127.0.0.1:5000-5010:5000-5010" + - "::1:6000:6000" + - "[::1]:6001:6001" + - "6060:6060/udp" ``` > [!NOTE] @@ -1524,7 +1675,7 @@ expressed in the short form. - `protocol`: The port protocol (`tcp` or `udp`). Defaults to `tcp`. - `app_protocol`: The application protocol (TCP/IP level 4 / OSI level 7) this port is used for. This is optional and can be used as a hint for Compose to offer richer behavior for protocols that it understands. Introduced in Docker Compose version [2.26.0](/manuals/compose/releases/release-notes.md#2260). - `mode`: Specifies how the port is published in a Swarm setup. If set to `host`, it publishes the port on every node in Swarm. If set to `ingress`, it allows load balancing across the nodes in Swarm. Defaults to `ingress`. -- `name`: A human-readable name for the port, used to document it's usage within the service. +- `name`: A human-readable name for the port, used to document its usage within the service. ```yml ports: @@ -1547,7 +1698,7 @@ ports: ### `post_start` -{{< introduced compose 2.30.0 "../../manuals/compose/releases/release-notes.md#2300" >}} +{{< summary-bar feature_name="Compose post start" >}} `post_start` defines a sequence of lifecycle hooks to run after a container has started. The exact timing of when the command is run is not guaranteed. @@ -1572,7 +1723,7 @@ For more information, see [Use lifecycle hooks](/manuals/compose/how-tos/lifecyc ### `pre_stop` -{{< introduced compose 2.30.0 "../../manuals/compose/releases/release-notes.md#2300" >}} +{{< summary-bar feature_name="Compose pre stop" >}} `pre_stop` defines a sequence of lifecycle hooks to run before the container is stopped. These hooks won't run if the container stops by itself or is terminated suddenly. @@ -1602,17 +1753,67 @@ services: - debug ``` +### `provider` + +{{< summary-bar feature_name="Compose provider services" >}} + +`provider` can be used to define a service that Compose won't manage directly. Compose delegated the service lifecycle to a dedicated or third-party component. + +```yaml + database: + provider: + type: awesomecloud + options: + type: mysql + foo: bar + app: + image: myapp + depends_on: + - database +``` + +As Compose runs the application, the `awesomecloud` binary is used to manage the `database` service setup. +Dependent service `app` receives additional environment variables prefixed by the service name so it can access the resource. + +For illustration, assuming `awesomecloud` execution produced variables `URL` and `API_KEY`, the `app` service +runs with environment variables `DATABASE_URL` and `DATABASE_API_KEY`. + +As Compose stops the application, the `awesomecloud` binary is used to manage the `database` service tear down. + +The mechanism used by Compose to delegate the service lifecycle to an external binary is described [here](https://github.com/docker/compose/tree/main/docs/extension.md). + +For more information on using the `provider` attribute, see [Use provider services](/manuals/compose/how-tos/provider-services.md). + +#### `type` + +`type` attribute is required. It defines the external component used by Compose to manage setup and tear down lifecycle +events. + +#### `options` + +`options` are specific to the selected provider and not validated by the compose specification + ### `pull_policy` `pull_policy` defines the decisions Compose makes when it starts to pull images. Possible values are: -* `always`: Compose always pulls the image from the registry. -* `never`: Compose doesn't pull the image from a registry and relies on the platform cached image. +- `always`: Compose always pulls the image from the registry. +- `never`: Compose doesn't pull the image from a registry and relies on the platform cached image. If there is no cached image, a failure is reported. -* `missing`: Compose pulls the image only if it's not available in the platform cache. +- `missing`: Compose pulls the image only if it's not available in the platform cache. This is the default option if you are not also using the [Compose Build Specification](build.md). `if_not_present` is considered an alias for this value for backward compatibility. -* `build`: Compose builds the image. Compose rebuilds the image if it's already present. +- `build`: Compose builds the image. Compose rebuilds the image if it's already present. +- `daily`: Compose checks the registry for image updates if the last pull took place more than 24 hours ago. +- `weekly`: Compose checks the registry for image updates if the last pull took place more than 7 days ago. +- `every_`: Compose checks the registry for image updates if the last pull took place before ``. Duration can be expressed in weeks (`w`), days (`d`), hours (`h`), minutes (`m`), seconds (`s`) or a combination of these. + +```yaml +services: + test: + image: nginx + pull_policy: every_12h +``` ### `read_only` @@ -1663,7 +1864,7 @@ When both are set, `scale` must be consistent with the `replicas` attribute in t ### `secrets` -{{< include "compose/services-secrets.md" >}} +{{% include "compose/services-secrets.md" %}} Two different syntax variants are supported; the short syntax and the long syntax. Long and short syntax for secrets may be used in the same Compose file. @@ -1708,7 +1909,9 @@ the service's containers. - `mode`: The [permissions](https://wintelguy.com/permissions-calc.pl) for the file to be mounted in `/run/secrets/` in the service's task containers, in octal notation. The default value is world-readable permissions (mode `0444`). - The writable bit must be ignored if set. The executable bit may be set. + The writable bit must be ignored if set. The executable bit may be set. + +Note that support for `uid`, `gid`, and `mode` attributes are not implemented in Docker Compose when the source of the secret is a [`file`](secrets.md). This is because bind-mounts used under the hood don't allow uid remapping. The following example sets the name of the `server-certificate` secret file to `server.cert` within the container, sets the mode to `0440` (group-readable), and sets the user and group @@ -1724,7 +1927,7 @@ services: target: server.cert uid: "103" gid: "103" - mode: "0440" + mode: 0o440 secrets: server-certificate: file: ./server.cert @@ -1749,7 +1952,7 @@ It's specified as a [byte value](extension.md#specifying-byte-values). ### `stdin_open` -`stdin_open` configures a service's container to run with an allocated stdin. This is the same as running a container with the +`stdin_open` configures a service's container to run with an allocated stdin. This is the same as running a container with the `-i` flag. For more information, see [Keep stdin open](/reference/cli/docker/container/run.md#interactive). Supported values are `true` or `false`. @@ -1836,7 +2039,7 @@ services: ### `tty` -`tty` configures a service's container to run with a TTY. This is the same as running a container with the +`tty` configures a service's container to run with a TTY. This is the same as running a container with the `-t` or `--tty` flag. For more information, see [Allocate a pseudo-TTY](/reference/cli/docker/container/run.md#tty). Supported values are `true` or `false`. @@ -1854,6 +2057,12 @@ ulimits: hard: 40000 ``` +### `use_api_socket` + +When `use_api_socket` is set, the container is able to interact with the underlying container engine through the API socket. +Your credentials are mounted inside the container so the container acts as a pure delegate for your commands relating to the container engine. +Typically, commands ran by container can `pull` and `push` to your registry. + ### `user` `user` overrides the user used to run the container process. The default is set by the image, for example Dockerfile `USER`. If it's not set, then `root`. @@ -1869,7 +2078,7 @@ userns_mode: "host" ### `uts` -{{< introduced compose 2.15.1 "/manuals/compose/releases/release-notes.md#2151" >}} +{{< summary-bar feature_name="Compose uts" >}} `uts` configures the UTS namespace mode set for the service container. When unspecified it is the runtime's decision to assign a UTS namespace, if supported. Available values are: @@ -1882,7 +2091,7 @@ it is the runtime's decision to assign a UTS namespace, if supported. Available ### `volumes` -{{< include "compose/services-volumes.md" >}} +{{% include "compose/services-volumes.md" %}} The following example shows a named volume (`db-data`) being used by the `backend` service, and a bind mount defined for a single service. @@ -1932,22 +2141,25 @@ The short syntax uses a single string with colon-separated values to specify a v > platform it rejects Compose files which use relative host paths with an error. To avoid ambiguities > with named volumes, relative paths should always begin with `.` or `..`. +> [!NOTE] +> +> For bind mounts, the short syntax creates a directory at the source path on the host if it doesn't exist. This is for backward compatibility with `docker-compose` legacy. +> It can be prevented by using long syntax and setting `create_host_path` to `false`. + #### Long syntax The long form syntax lets you configure additional fields that can't be expressed in the short form. -- `type`: The mount type. Either `volume`, `bind`, `tmpfs`, `npipe`, or `cluster` -- `source`: The source of the mount, a path on the host for a bind mount, or the +- `type`: The mount type. Either `volume`, `bind`, `tmpfs`, `image`, `npipe`, or `cluster` +- `source`: The source of the mount, a path on the host for a bind mount, a Docker image reference for an image mount, or the name of a volume defined in the [top-level `volumes` key](volumes.md). Not applicable for a tmpfs mount. - `target`: The path in the container where the volume is mounted. - `read_only`: Flag to set the volume as read-only. - `bind`: Used to configure additional bind options: - `propagation`: The propagation mode used for the bind. - - `create_host_path`: Creates a directory at the source path on host if there is nothing present. - Compose does nothing if there is something present at the path. This is automatically implied by short syntax - for backward compatibility with `docker-compose` legacy. + - `create_host_path`: Creates a directory at the source path on host if there is nothing present. Defaults to `true`. - `selinux`: The SELinux re-labeling option `z` (shared) or `Z` (private) - `volume`: Configures additional volume options: - `nocopy`: Flag to disable copying of data from a container when a volume is created. @@ -1955,12 +2167,14 @@ expressed in the short form. - `tmpfs`: Configures additional tmpfs options: - `size`: The size for the tmpfs mount in bytes (either numeric or as bytes unit). - `mode`: The file mode for the tmpfs mount as Unix permission bits as an octal number. Introduced in Docker Compose version [2.14.0](/manuals/compose/releases/release-notes.md#2260). +- `image`: Configures additional image options: + - `subpath`: Path inside the source image to mount instead of the image root. Available in [Docker Compose version 2.35.0](/manuals/compose/releases/release-notes.md#2350) - `consistency`: The consistency requirements of the mount. Available values are platform specific. > [!TIP] > -> Working with large repositories or monorepos, or with virtual file systems that are no longer scaling with your codebase? -> Compose now takes advantage of [Synchronized file shares](/manuals/desktop/features/synchronized-file-sharing.md) and automatically creates file shares for bind mounts. +> Working with large repositories or monorepos, or with virtual file systems that are no longer scaling with your codebase? +> Compose now takes advantage of [Synchronized file shares](/manuals/desktop/features/synchronized-file-sharing.md) and automatically creates file shares for bind mounts. > Ensure you're signed in to Docker with a paid subscription and have enabled both **Access experimental features** and **Manage Synchronized file shares with Compose** in Docker Desktop's settings. ### `volumes_from` diff --git a/content/reference/compose-file/version-and-name.md b/content/reference/compose-file/version-and-name.md index cb5bd689ab3d..aaf992c56b65 100644 --- a/content/reference/compose-file/version-and-name.md +++ b/content/reference/compose-file/version-and-name.md @@ -9,10 +9,11 @@ weight: 10 ## Version top-level element (obsolete) -The top-level `version` property is defined by the Compose Specification for backward compatibility. It is only informative and you'll receive a warning message that it is obsolete if used. +> [!IMPORTANT] +> +> The top-level `version` property is defined by the Compose Specification for backward compatibility. It is only informative and you'll receive a warning message that it is obsolete if used. -Compose doesn't use `version` to select an exact schema to validate the Compose file, but -prefers the most recent schema when it's implemented. +Compose always uses the most recent schema to validate the Compose file, regardless of the `version` field. Compose validates whether it can fully parse the Compose file. If some fields are unknown, typically because the Compose file was written with fields defined by a newer version of the Specification, you'll receive a warning message. @@ -20,6 +21,7 @@ because the Compose file was written with fields defined by a newer version of t ## Name top-level element The top-level `name` property is defined by the Compose Specification as the project name to be used if you don't set one explicitly. + Compose offers a way for you to override this name, and sets a default project name to be used if the top-level `name` element is not set. diff --git a/content/reference/compose-file/volumes.md b/content/reference/compose-file/volumes.md index 333d8e666a73..64e826f58003 100644 --- a/content/reference/compose-file/volumes.md +++ b/content/reference/compose-file/volumes.md @@ -1,13 +1,14 @@ --- -title: Volumes top-level element -description: Explore all the attributes the volumes top-level element can have. +linkTitle: Volumes +title: Define and manage volumes in Docker Compose +description: Control how volumes are declared and shared between services using the top-level volumes element. keywords: compose, compose specification, volumes, compose file reference aliases: - /compose/compose-file/07-volumes/ weight: 40 --- -{{< include "compose/volumes.md" >}} +{{% include "compose/volumes.md" %}} To use a volume across multiple services, you must explicitly grant each service access by using the [volumes](services.md#volumes) attribute within the `services` top-level element. The `volumes` attribute has additional syntax that provides more granular control. diff --git a/content/reference/glossary.md b/content/reference/glossary.md index 5f26de459e81..333da26ad87e 100644 --- a/content/reference/glossary.md +++ b/content/reference/glossary.md @@ -2,14 +2,22 @@ title: Glossary description: Glossary of terms used around Docker keywords: glossary, docker, terms, definitions -layout: glossary notoc: true +layout: glossary aliases: - /engine/reference/glossary/ - /glossary/ --- +> [!TIP] +> +> Looking for a definition that's not listed or need a more context-aware +> explanation? +> +> Try Ask AI. + + +--> \ No newline at end of file diff --git a/content/reference/samples/_index.md b/content/reference/samples/_index.md index 49c769ba4bce..eb07f7f3a941 100644 --- a/content/reference/samples/_index.md +++ b/content/reference/samples/_index.md @@ -35,4 +35,4 @@ Learn how to containerize different types of services by walking through Officia ## Other samples -[AI/ML](../samples/ai-ml.md) \| [Cloudflared](../samples/cloudflared.md) \| [Elasticsearch / Logstash / Kibana](../samples/elasticsearch.md) \| [Minecraft](../samples/minecraft.md) \| [NGINX](../samples/nginx.md) \| [Pi-hole](../samples/pi-hole.md) \| [Plex](../samples/plex.md) \| [Traefik](../samples/traefik.md) \| [WireGuard](../samples/wireguard.md) +[Agentic AI](../samples/agentic-ai.md) \| [AI/ML](../samples/ai-ml.md) \| [Cloudflared](../samples/cloudflared.md) \| [Elasticsearch / Logstash / Kibana](../samples/elasticsearch.md) \| [Minecraft](../samples/minecraft.md) \| [NGINX](../samples/nginx.md) \| [Pi-hole](../samples/pi-hole.md) \| [Plex](../samples/plex.md) \| [Traefik](../samples/traefik.md) \| [WireGuard](../samples/wireguard.md) diff --git a/content/reference/samples/agentic-ai.md b/content/reference/samples/agentic-ai.md new file mode 100644 index 000000000000..58421ca486f8 --- /dev/null +++ b/content/reference/samples/agentic-ai.md @@ -0,0 +1,5 @@ +--- +title: Agentic AI samples +description: Docker samples for agentic AI. +service: agentic-ai +--- diff --git a/data/buildx/docker_buildx_bake.yaml b/data/buildx/docker_buildx_bake.yaml index 203d066a00a1..066d67119da0 100644 --- a/data/buildx/docker_buildx_bake.yaml +++ b/data/buildx/docker_buildx_bake.yaml @@ -20,6 +20,7 @@ options: value_type: stringArray default_value: '[]' description: Allow build to access specified resources + details_url: '#allow' deprecated: false hidden: false experimental: false @@ -218,6 +219,80 @@ inherited_options: kubernetes: false swarm: false examples: |- + ### Allow extra privileged entitlement (--allow) {#allow} + + ```text + --allow=ENTITLEMENT[=VALUE] + ``` + + Entitlements are designed to provide controlled access to privileged + operations. By default, Buildx and BuildKit operates with restricted + permissions to protect users and their systems from unintended side effects or + security risks. The `--allow` flag explicitly grants access to additional + entitlements, making it clear when a build or bake operation requires elevated + privileges. + + In addition to BuildKit's `network.host` and `security.insecure` entitlements + (see [`docker buildx build --allow`](/reference/cli/docker/buildx/build/#allow), + Bake supports file system entitlements that grant granular control over file + system access. These are particularly useful when working with builds that need + access to files outside the default working directory. + + Bake supports the following filesystem entitlements: + + - `--allow fs=` - Grant read and write access to files outside of the + working directory. + - `--allow fs.read=` - Grant read access to files outside of the + working directory. + - `--allow fs.write=` - Grant write access to files outside of the + working directory. + + The `fs` entitlements take a path value (relative or absolute) to a directory + on the filesystem. Alternatively, you can pass a wildcard (`*`) to allow Bake + to access the entire filesystem. + + ### Example: fs.read + + Given the following Bake configuration, Bake would need to access the parent + directory, relative to the Bake file. + + ```hcl + target "app" { + context = "../src" + } + ``` + + Assuming `docker buildx bake app` is executed in the same directory as the + `docker-bake.hcl` file, you would need to explicitly allow Bake to read from + the `../src` directory. In this case, the following invocations all work: + + ```console + $ docker buildx bake --allow fs.read=* app + $ docker buildx bake --allow fs.read=../src app + $ docker buildx bake --allow fs=* app + ``` + + ### Example: fs.write + + The following `docker-bake.hcl` file requires write access to the `/tmp` + directory. + + ```hcl + target "app" { + output = "/tmp" + } + ``` + + Assuming `docker buildx bake app` is executed outside of the `/tmp` directory, + you would need to allow the `fs.write` entitlement, either by specifying the + path or using a wildcard: + + ```console + $ docker buildx bake --allow fs=/tmp app + $ docker buildx bake --allow fs.write=/tmp app + $ docker buildx bake --allow fs.write=* app + ``` + ### Override the configured builder instance (--builder) {#builder} Same as [`buildx --builder`](/reference/cli/docker/buildx/#builder). @@ -440,18 +515,22 @@ examples: |- ```console $ docker buildx bake --set target.args.mybuildarg=value $ docker buildx bake --set target.platform=linux/arm64 - $ docker buildx bake --set foo*.args.mybuildarg=value # overrides build arg for all targets starting with 'foo' - $ docker buildx bake --set *.platform=linux/arm64 # overrides platform for all targets - $ docker buildx bake --set foo*.no-cache # bypass caching only for targets starting with 'foo' + $ docker buildx bake --set foo*.args.mybuildarg=value # overrides build arg for all targets starting with 'foo' + $ docker buildx bake --set *.platform=linux/arm64 # overrides platform for all targets + $ docker buildx bake --set foo*.no-cache # bypass caching only for targets starting with 'foo' + $ docker buildx bake --set target.platform+=linux/arm64 # appends 'linux/arm64' to the platform list ``` You can override the following fields: + * `annotations` + * `attest` * `args` * `cache-from` * `cache-to` * `context` * `dockerfile` + * `entitlements` * `labels` * `load` * `no-cache` @@ -464,6 +543,23 @@ examples: |- * `ssh` * `tags` * `target` + + You can append using `+=` operator for the following fields: + + * `annotations`¹ + * `attest`¹ + * `cache-from` + * `cache-to` + * `entitlements`¹ + * `no-cache-filter` + * `output` + * `platform` + * `secrets` + * `ssh` + * `tags` + + > [!NOTE] + > ¹ These fields already append by default. deprecated: false hidden: false experimental: false diff --git a/data/buildx/docker_buildx_build.yaml b/data/buildx/docker_buildx_build.yaml index 39d13d3ac098..91aca49cfc8a 100644 --- a/data/buildx/docker_buildx_build.yaml +++ b/data/buildx/docker_buildx_build.yaml @@ -18,7 +18,7 @@ options: kubernetes: false swarm: false - option: allow - value_type: stringSlice + value_type: stringArray default_value: '[]' description: | Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`) @@ -187,16 +187,6 @@ options: experimentalcli: false kubernetes: false swarm: false - - option: detach - value_type: bool - default_value: "false" - description: Detach buildx server (supported only on linux) - deprecated: false - hidden: false - experimental: false - experimentalcli: true - kubernetes: false - swarm: false - option: file shorthand: f value_type: string @@ -415,15 +405,6 @@ options: experimentalcli: false kubernetes: false swarm: false - - option: root - value_type: string - description: Specify root directory of server to connect - deprecated: false - hidden: false - experimental: false - experimentalcli: true - kubernetes: false - swarm: false - option: sbom value_type: string description: Shorthand for `--attest=type=sbom` @@ -456,16 +437,6 @@ options: experimentalcli: false kubernetes: false swarm: false - - option: server-config - value_type: string - description: | - Specify buildx server config file (used only when launching new server) - deprecated: false - hidden: false - experimental: false - experimentalcli: true - kubernetes: false - swarm: false - option: shm-size value_type: bytes default_value: "0" @@ -1433,7 +1404,7 @@ examples: |- ###### `type=file` usage In the following example, `type=file` is automatically detected because no - environment variable mathing `aws` (the ID) is set. + environment variable matching `aws` (the ID) is set. ```console $ docker buildx build --secret id=aws,src=$HOME/.aws/credentials . diff --git a/data/buildx/docker_buildx_debug.yaml b/data/buildx/docker_buildx_debug.yaml index 90e6d0e8756a..f9b45c6ad3a0 100644 --- a/data/buildx/docker_buildx_debug.yaml +++ b/data/buildx/docker_buildx_debug.yaml @@ -9,16 +9,6 @@ cname: clink: - docker_buildx_debug_build.yaml options: - - option: detach - value_type: bool - default_value: "true" - description: Detach buildx server for the monitor (supported only on linux) - deprecated: false - hidden: false - experimental: false - experimentalcli: true - kubernetes: false - swarm: false - option: invoke value_type: string description: Launch a monitor with executing specified command @@ -49,25 +39,6 @@ options: experimentalcli: false kubernetes: false swarm: false - - option: root - value_type: string - description: Specify root directory of server to connect for the monitor - deprecated: false - hidden: false - experimental: false - experimentalcli: true - kubernetes: false - swarm: false - - option: server-config - value_type: string - description: | - Specify buildx server config file for the monitor (used only when launching new server) - deprecated: false - hidden: false - experimental: false - experimentalcli: true - kubernetes: false - swarm: false inherited_options: - option: builder value_type: string diff --git a/data/buildx/docker_buildx_debug_build.yaml b/data/buildx/docker_buildx_debug_build.yaml index 38c3faa67ec6..547ae9e122a4 100644 --- a/data/buildx/docker_buildx_debug_build.yaml +++ b/data/buildx/docker_buildx_debug_build.yaml @@ -17,7 +17,7 @@ options: kubernetes: false swarm: false - option: allow - value_type: stringSlice + value_type: stringArray default_value: '[]' description: | Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`) @@ -176,16 +176,6 @@ options: experimentalcli: false kubernetes: false swarm: false - - option: detach - value_type: bool - default_value: "false" - description: Detach buildx server (supported only on linux) - deprecated: false - hidden: false - experimental: false - experimentalcli: true - kubernetes: false - swarm: false - option: file shorthand: f value_type: string @@ -394,15 +384,6 @@ options: experimentalcli: false kubernetes: false swarm: false - - option: root - value_type: string - description: Specify root directory of server to connect - deprecated: false - hidden: false - experimental: false - experimentalcli: true - kubernetes: false - swarm: false - option: sbom value_type: string description: Shorthand for `--attest=type=sbom` @@ -433,16 +414,6 @@ options: experimentalcli: false kubernetes: false swarm: false - - option: server-config - value_type: string - description: | - Specify buildx server config file (used only when launching new server) - deprecated: false - hidden: false - experimental: false - experimentalcli: true - kubernetes: false - swarm: false - option: shm-size value_type: bytes default_value: "0" diff --git a/data/buildx/docker_buildx_dial-stdio.yaml b/data/buildx/docker_buildx_dial-stdio.yaml index ca47c5afd741..4ba6f40e9af2 100644 --- a/data/buildx/docker_buildx_dial-stdio.yaml +++ b/data/buildx/docker_buildx_dial-stdio.yaml @@ -1,8 +1,10 @@ command: docker buildx dial-stdio short: Proxy current stdio streams to builder instance long: |- - dial-stdio uses the stdin and stdout streams of the command to proxy to the configured builder instance. - It is not intended to be used by humans, but rather by other tools that want to interact with the builder instance via BuildKit API. + dial-stdio uses the stdin and stdout streams of the command to proxy to the + configured builder instance. It is not intended to be used by humans, but + rather by other tools that want to interact with the builder instance via + BuildKit API. usage: docker buildx dial-stdio pname: docker buildx plink: docker_buildx.yaml @@ -50,7 +52,7 @@ inherited_options: swarm: false examples: |- Example go program that uses the dial-stdio command wire up a buildkit client. - This is for example use only and may not be suitable for production use. + This is, for example, use only and may not be suitable for production use. ```go client.New(ctx, "", client.WithContextDialer(func(context.Context, string) (net.Conn, error) { diff --git a/data/buildx/docker_buildx_history.yaml b/data/buildx/docker_buildx_history.yaml index e3e172193522..cc77851b07e4 100644 --- a/data/buildx/docker_buildx_history.yaml +++ b/data/buildx/docker_buildx_history.yaml @@ -5,17 +5,23 @@ usage: docker buildx history pname: docker buildx plink: docker_buildx.yaml cname: + - docker buildx history export + - docker buildx history import - docker buildx history inspect - docker buildx history logs - docker buildx history ls - docker buildx history open - docker buildx history rm + - docker buildx history trace clink: + - docker_buildx_history_export.yaml + - docker_buildx_history_import.yaml - docker_buildx_history_inspect.yaml - docker_buildx_history_logs.yaml - docker_buildx_history_ls.yaml - docker_buildx_history_open.yaml - docker_buildx_history_rm.yaml + - docker_buildx_history_trace.yaml inherited_options: - option: builder value_type: string diff --git a/data/buildx/docker_buildx_history_export.yaml b/data/buildx/docker_buildx_history_export.yaml new file mode 100644 index 000000000000..214ebd004d20 --- /dev/null +++ b/data/buildx/docker_buildx_history_export.yaml @@ -0,0 +1,138 @@ +command: docker buildx history export +short: Export build records into Docker Desktop bundle +long: |- + Export one or more build records to `.dockerbuild` archive files. These archives + contain metadata, logs, and build outputs, and can be imported into Docker + Desktop or shared across environments. +usage: docker buildx history export [OPTIONS] [REF...] +pname: docker buildx history +plink: docker_buildx_history.yaml +options: + - option: all + value_type: bool + default_value: "false" + description: Export all build records for the builder + details_url: '#all' + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: finalize + value_type: bool + default_value: "false" + description: Ensure build records are finalized before exporting + details_url: '#finalize' + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: output + shorthand: o + value_type: string + description: Output file path + details_url: '#output' + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +inherited_options: + - option: builder + value_type: string + description: Override the configured builder instance + details_url: '#builder' + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: debug + shorthand: D + value_type: bool + default_value: "false" + description: Enable debug logging + details_url: '#debug' + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +examples: |- + ### Export all build records to a file (--all) {#all} + + Use the `--all` flag and redirect the output: + + ```console + docker buildx history export --all > all-builds.dockerbuild + ``` + + Or use the `--output` flag: + + ```console + docker buildx history export --all -o all-builds.dockerbuild + ``` + + ### Use a specific builder instance (--builder) {#builder} + + ```console + docker buildx history export --builder builder0 ^1 -o builder0-build.dockerbuild + ``` + + ### Enable debug logging (--debug) {#debug} + + ```console + docker buildx history export --debug qu2gsuo8ejqrwdfii23xkkckt -o debug-build.dockerbuild + ``` + + ### Ensure build records are finalized before exporting (--finalize) {#finalize} + + Clients can report their own traces concurrently, and not all traces may be + saved yet by the time of the export. Use the `--finalize` flag to ensure all + traces are finalized before exporting. + + ```console + docker buildx history export --finalize qu2gsuo8ejqrwdfii23xkkckt -o finalized-build.dockerbuild + ``` + + ### Export a single build to a custom file (--output) {#output} + + ```console + docker buildx history export qu2gsuo8ejqrwdfii23xkkckt --output mybuild.dockerbuild + ``` + + You can find build IDs by running: + + ```console + docker buildx history ls + ``` + + To export two builds to separate files: + + ```console + # Using build IDs + docker buildx history export qu2gsuo8ejqrwdfii23xkkckt qsiifiuf1ad9pa9qvppc0z1l3 -o multi.dockerbuild + + # Or using relative offsets + docker buildx history export ^1 ^2 -o multi.dockerbuild + ``` + + Or use shell redirection: + + ```console + docker buildx history export ^1 > mybuild.dockerbuild + docker buildx history export ^2 > backend-build.dockerbuild + ``` +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/data/buildx/docker_buildx_history_import.yaml b/data/buildx/docker_buildx_history_import.yaml new file mode 100644 index 000000000000..bf6e2ac1fd07 --- /dev/null +++ b/data/buildx/docker_buildx_history_import.yaml @@ -0,0 +1,74 @@ +command: docker buildx history import +short: Import build records into Docker Desktop +long: |- + Import a build record from a `.dockerbuild` archive into Docker Desktop. This + lets you view, inspect, and analyze builds created in other environments or CI + pipelines. +usage: docker buildx history import [OPTIONS] - +pname: docker buildx history +plink: docker_buildx_history.yaml +options: + - option: file + shorthand: f + value_type: stringArray + default_value: '[]' + description: Import from a file path + details_url: '#file' + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +inherited_options: + - option: builder + value_type: string + description: Override the configured builder instance + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: debug + shorthand: D + value_type: bool + default_value: "false" + description: Enable debug logging + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +examples: |- + ### Import a `.dockerbuild` archive from standard input + + ```console + docker buildx history import < mybuild.dockerbuild + ``` + + ### Import a build archive from a file (--file) {#file} + + ```console + docker buildx history import --file ./artifacts/backend-build.dockerbuild + ``` + + ### Open a build manually + + By default, the `import` command automatically opens the imported build in Docker + Desktop. You don't need to run `open` unless you're opening a specific build + or re-opening it later. + + If you've imported multiple builds, you can open one manually: + + ```console + docker buildx history open ci-build + ``` +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/data/buildx/docker_buildx_history_inspect.yaml b/data/buildx/docker_buildx_history_inspect.yaml index f226342956ce..23c5ee37b4eb 100644 --- a/data/buildx/docker_buildx_history_inspect.yaml +++ b/data/buildx/docker_buildx_history_inspect.yaml @@ -1,6 +1,9 @@ command: docker buildx history inspect -short: Inspect a build -long: Inspect a build +short: Inspect a build record +long: |- + Inspect a build record to view metadata such as duration, status, build inputs, + platforms, outputs, and attached artifacts. You can also use flags to extract + provenance, SBOMs, or other detailed information. usage: docker buildx history inspect [OPTIONS] [REF] pname: docker buildx history plink: docker_buildx_history.yaml @@ -8,6 +11,18 @@ cname: - docker buildx history inspect attachment clink: - docker_buildx_history_inspect_attachment.yaml +options: + - option: format + value_type: string + default_value: pretty + description: Format the output + details_url: '#format' + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false inherited_options: - option: builder value_type: string @@ -29,6 +44,145 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false +examples: |- + ### Inspect the most recent build + + ```console + $ docker buildx history inspect + Name: buildx (binaries) + Context: . + Dockerfile: Dockerfile + VCS Repository: https://github.com/crazy-max/buildx.git + VCS Revision: f15eaa1ee324ffbbab29605600d27a84cab86361 + Target: binaries + Platforms: linux/amd64 + Keep Git Dir: true + + Started: 2025-02-07 11:56:24 + Duration: 1m 1s + Build Steps: 16/16 (25% cached) + + Image Resolve Mode: local + + Materials: + URI DIGEST + pkg:docker/docker/dockerfile@1 sha256:93bfd3b68c109427185cd78b4779fc82b484b0b7618e36d0f104d4d801e66d25 + pkg:docker/golang@1.23-alpine3.21?platform=linux%2Famd64 sha256:2c49857f2295e89b23b28386e57e018a86620a8fede5003900f2d138ba9c4037 + pkg:docker/tonistiigi/xx@1.6.1?platform=linux%2Famd64 sha256:923441d7c25f1e2eb5789f82d987693c47b8ed987c4ab3b075d6ed2b5d6779a3 + + Attachments: + DIGEST PLATFORM TYPE + sha256:217329d2af959d4f02e3a96dcbe62bf100cab1feb8006a047ddfe51a5397f7e3 https://slsa.dev/provenance/v0.2 + ``` + + ### Inspect a specific build + + ```console + # Using a build ID + docker buildx history inspect qu2gsuo8ejqrwdfii23xkkckt + + # Or using a relative offset + docker buildx history inspect ^1 + ``` + + ### Format the output (--format) {#format} + + The formatting options (`--format`) pretty-prints the output to `pretty` (default), + `json` or using a Go template. + + #### Pretty output + + ```console + $ docker buildx history inspect + Name: buildx (binaries) + Context: . + Dockerfile: Dockerfile + VCS Repository: https://github.com/crazy-max/buildx.git + VCS Revision: f15eaa1ee324ffbbab29605600d27a84cab86361 + Target: binaries + Platforms: linux/amd64 + Keep Git Dir: true + + Started: 2025-02-07 11:56:24 + Duration: 1m 1s + Build Steps: 16/16 (25% cached) + + Image Resolve Mode: local + + Materials: + URI DIGEST + pkg:docker/docker/dockerfile@1 sha256:93bfd3b68c109427185cd78b4779fc82b484b0b7618e36d0f104d4d801e66d25 + pkg:docker/golang@1.23-alpine3.21?platform=linux%2Famd64 sha256:2c49857f2295e89b23b28386e57e018a86620a8fede5003900f2d138ba9c4037 + pkg:docker/tonistiigi/xx@1.6.1?platform=linux%2Famd64 sha256:923441d7c25f1e2eb5789f82d987693c47b8ed987c4ab3b075d6ed2b5d6779a3 + + Attachments: + DIGEST PLATFORM TYPE + sha256:217329d2af959d4f02e3a96dcbe62bf100cab1feb8006a047ddfe51a5397f7e3 https://slsa.dev/provenance/v0.2 + + Print build logs: docker buildx history logs g9808bwrjrlkbhdamxklx660b + ``` + + #### JSON output + + ```console + $ docker buildx history inspect --format json + { + "Name": "buildx (binaries)", + "Ref": "5w7vkqfi0rf59hw4hnmn627r9", + "Context": ".", + "Dockerfile": "Dockerfile", + "VCSRepository": "https://github.com/crazy-max/buildx.git", + "VCSRevision": "f15eaa1ee324ffbbab29605600d27a84cab86361", + "Target": "binaries", + "Platform": [ + "linux/amd64" + ], + "KeepGitDir": true, + "StartedAt": "2025-02-07T12:01:05.75807272+01:00", + "CompletedAt": "2025-02-07T12:02:07.991778875+01:00", + "Duration": 62233706155, + "Status": "completed", + "NumCompletedSteps": 16, + "NumTotalSteps": 16, + "NumCachedSteps": 4, + "Config": { + "ImageResolveMode": "local" + }, + "Materials": [ + { + "URI": "pkg:docker/docker/dockerfile@1", + "Digests": [ + "sha256:93bfd3b68c109427185cd78b4779fc82b484b0b7618e36d0f104d4d801e66d25" + ] + }, + { + "URI": "pkg:docker/golang@1.23-alpine3.21?platform=linux%2Famd64", + "Digests": [ + "sha256:2c49857f2295e89b23b28386e57e018a86620a8fede5003900f2d138ba9c4037" + ] + }, + { + "URI": "pkg:docker/tonistiigi/xx@1.6.1?platform=linux%2Famd64", + "Digests": [ + "sha256:923441d7c25f1e2eb5789f82d987693c47b8ed987c4ab3b075d6ed2b5d6779a3" + ] + } + ], + "Attachments": [ + { + "Digest": "sha256:450fdd2e6b868fecd69e9891c2c404ba461aa38a47663b4805edeb8d2baf80b1", + "Type": "https://slsa.dev/provenance/v0.2" + } + ] + } + ``` + + #### Go template output + + ```console + $ docker buildx history inspect --format "{{.Name}}: {{.VCSRepository}} ({{.VCSRevision}})" + buildx (binaries): https://github.com/crazy-max/buildx.git (f15eaa1ee324ffbbab29605600d27a84cab86361) + ``` deprecated: false hidden: false experimental: false diff --git a/data/buildx/docker_buildx_history_inspect_attachment.yaml b/data/buildx/docker_buildx_history_inspect_attachment.yaml index c43308120bba..ba883f474835 100644 --- a/data/buildx/docker_buildx_history_inspect_attachment.yaml +++ b/data/buildx/docker_buildx_history_inspect_attachment.yaml @@ -1,6 +1,9 @@ command: docker buildx history inspect attachment -short: Inspect a build attachment -long: Inspect a build attachment +short: Inspect a build record attachment +long: |- + Inspect a specific attachment from a build record, such as a provenance file or + SBOM. Attachments are optional artifacts stored with the build and may be + platform-specific. usage: docker buildx history inspect attachment [OPTIONS] REF [DIGEST] pname: docker buildx history inspect plink: docker_buildx_history_inspect.yaml @@ -17,6 +20,7 @@ options: - option: type value_type: string description: Type of attachment + details_url: '#type' deprecated: false hidden: false experimental: false @@ -44,6 +48,64 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false +examples: |- + ### Inspect a provenance attachment from a build (--type) {#type} + + Supported types include `provenance` and `sbom`. + + ```console + $ docker buildx history inspect attachment qu2gsuo8ejqrwdfii23xkkckt --type provenance + { + "_type": "https://slsa.dev/provenance/v0.2", + "buildDefinition": { + "buildType": "https://build.docker.com/BuildKit@v1", + "externalParameters": { + "target": "app", + "platforms": ["linux/amd64"] + } + }, + "runDetails": { + "builder": "docker", + "by": "ci@docker.com" + } + } + ``` + + ### Inspect a SBOM for linux/amd64 + + ```console + $ docker buildx history inspect attachment ^0 \ + --type sbom \ + --platform linux/amd64 + { + "bomFormat": "CycloneDX", + "specVersion": "1.5", + "version": 1, + "components": [ + { + "type": "library", + "name": "alpine", + "version": "3.18.2" + } + ] + } + ``` + + ### Inspect an attachment by digest + + You can inspect an attachment directly using its digset, which you can get from + the `inspect` output: + + ```console + # Using a build ID + docker buildx history inspect attachment qu2gsuo8ejqrwdfii23xkkckt sha256:abcdef123456... + + # Or using a relative offset + docker buildx history inspect attachment ^0 sha256:abcdef123456... + ``` + + Use `--type sbom` or `--type provenance` to filter attachments by type. To + inspect a specific attachment by digest, omit the `--type` flag. deprecated: false hidden: false experimental: false diff --git a/data/buildx/docker_buildx_history_logs.yaml b/data/buildx/docker_buildx_history_logs.yaml index c6afba47cda9..3a15e9efd24d 100644 --- a/data/buildx/docker_buildx_history_logs.yaml +++ b/data/buildx/docker_buildx_history_logs.yaml @@ -1,6 +1,15 @@ command: docker buildx history logs -short: Print the logs of a build -long: Print the logs of a build +short: Print the logs of a build record +long: |- + Print the logs for a completed build. The output appears in the same format as + `--progress=plain`, showing the full logs for each step. + + By default, this shows logs for the most recent build on the current builder. + + You can also specify an earlier build using an offset. For example: + + - `^1` shows logs for the build before the most recent + - `^2` shows logs for the build two steps back usage: docker buildx history logs [OPTIONS] [REF] pname: docker buildx history plink: docker_buildx_history.yaml @@ -9,6 +18,7 @@ options: value_type: string default_value: plain description: Set type of progress output (plain, rawjson, tty) + details_url: '#progress' deprecated: false hidden: false experimental: false @@ -36,6 +46,42 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false +examples: |- + ### Print logs for the most recent build + + ```console + $ docker buildx history logs + #1 [internal] load build definition from Dockerfile + #1 transferring dockerfile: 31B done + #1 DONE 0.0s + #2 [internal] load .dockerignore + #2 transferring context: 2B done + #2 DONE 0.0s + ... + ``` + + By default, this shows logs for the most recent build on the current builder. + + ### Print logs for a specific build + + To print logs for a specific build, use a build ID or offset: + + ```console + # Using a build ID + docker buildx history logs qu2gsuo8ejqrwdfii23xkkckt + + # Or using a relative offset + docker buildx history logs ^1 + ``` + + ### Set type of progress output (--progress) {#progress} + + ```console + $ docker buildx history logs ^1 --progress rawjson + {"id":"buildx_step_1","status":"START","timestamp":"2024-05-01T12:34:56.789Z","detail":"[internal] load build definition from Dockerfile"} + {"id":"buildx_step_1","status":"COMPLETE","timestamp":"2024-05-01T12:34:57.001Z","duration":212000000} + ... + ``` deprecated: false hidden: false experimental: false diff --git a/data/buildx/docker_buildx_history_ls.yaml b/data/buildx/docker_buildx_history_ls.yaml index a1bdf1833ee1..d49eec62ce7b 100644 --- a/data/buildx/docker_buildx_history_ls.yaml +++ b/data/buildx/docker_buildx_history_ls.yaml @@ -1,14 +1,42 @@ command: docker buildx history ls short: List build records -long: List build records -usage: docker buildx history ls +long: |- + List completed builds recorded by the active builder. Each entry includes the + build ID, name, status, timestamp, and duration. + + By default, only records for the current builder are shown. You can filter + results using flags. +usage: docker buildx history ls [OPTIONS] pname: docker buildx history plink: docker_buildx_history.yaml options: + - option: filter + value_type: stringArray + default_value: '[]' + description: Provide filter values (e.g., `status=error`) + details_url: '#filter' + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false - option: format value_type: string default_value: table description: Format the output + details_url: '#format' + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: local + value_type: bool + default_value: "false" + description: List records for current repository only + details_url: '#local' deprecated: false hidden: false experimental: false @@ -19,6 +47,7 @@ options: value_type: bool default_value: "false" description: Don't truncate output + details_url: '#no-trunc' deprecated: false hidden: false experimental: false @@ -46,6 +75,80 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false +examples: |- + ### List all build records for the current builder + + ```console + $ docker buildx history ls + BUILD ID NAME STATUS CREATED AT DURATION + qu2gsuo8ejqrwdfii23xkkckt .dev/2850 Completed 3 days ago 1.4s + qsiifiuf1ad9pa9qvppc0z1l3 .dev/2850 Completed 3 days ago 1.3s + g9808bwrjrlkbhdamxklx660b .dev/3120 Completed 5 days ago 2.1s + ``` + + ### List failed builds (--filter) {#filter} + + ```console + docker buildx history ls --filter status=error + ``` + + You can filter the list using the `--filter` flag. Supported filters include: + + | Filter | Supported comparisons | Example | + |:---------------------------------------|:-------------------------------------------------|:---------------------------| + | `ref`, `repository`, `status` | Support `=` and `!=` comparisons | `--filter status!=success` | + | `startedAt`, `completedAt`, `duration` | Support `<` and `>` comparisons with time values | `--filter duration>30s` | + + You can combine multiple filters by repeating the `--filter` flag: + + ```console + docker buildx history ls --filter status=error --filter duration>30s + ``` + + ### List builds from the current project (--local) {#local} + + ```console + docker buildx history ls --local + ``` + + ### Display full output without truncation (--no-trunc) {#no-trunc} + + ```console + docker buildx history ls --no-trunc + ``` + + ### Format output (--format) {#format} + + #### JSON output + + ```console + $ docker buildx history ls --format json + [ + { + "ID": "qu2gsuo8ejqrwdfii23xkkckt", + "Name": ".dev/2850", + "Status": "Completed", + "CreatedAt": "2025-04-15T12:33:00Z", + "Duration": "1.4s" + }, + { + "ID": "qsiifiuf1ad9pa9qvppc0z1l3", + "Name": ".dev/2850", + "Status": "Completed", + "CreatedAt": "2025-04-15T12:29:00Z", + "Duration": "1.3s" + } + ] + ``` + + #### Go template output + + ```console + $ docker buildx history ls --format '{{.Name}} - {{.Duration}}' + .dev/2850 - 1.4s + .dev/2850 - 1.3s + .dev/3120 - 2.1s + ``` deprecated: false hidden: false experimental: false diff --git a/data/buildx/docker_buildx_history_open.yaml b/data/buildx/docker_buildx_history_open.yaml index e79b0ba56997..1b760fcf6456 100644 --- a/data/buildx/docker_buildx_history_open.yaml +++ b/data/buildx/docker_buildx_history_open.yaml @@ -1,6 +1,8 @@ command: docker buildx history open -short: Open a build in Docker Desktop -long: Open a build in Docker Desktop +short: Open a build record in Docker Desktop +long: |- + Open a build record in Docker Desktop for visual inspection. This requires + Docker Desktop to be installed and running on the host machine. usage: docker buildx history open [OPTIONS] [REF] pname: docker buildx history plink: docker_buildx_history.yaml @@ -25,6 +27,24 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false +examples: |- + ### Open the most recent build in Docker Desktop + + ```console + docker buildx history open + ``` + + By default, this opens the most recent build on the current builder. + + ### Open a specific build + + ```console + # Using a build ID + docker buildx history open qu2gsuo8ejqrwdfii23xkkckt + + # Or using a relative offset + docker buildx history open ^1 + ``` deprecated: false hidden: false experimental: false diff --git a/data/buildx/docker_buildx_history_rm.yaml b/data/buildx/docker_buildx_history_rm.yaml index aa3ddd173cbb..2c35a7277587 100644 --- a/data/buildx/docker_buildx_history_rm.yaml +++ b/data/buildx/docker_buildx_history_rm.yaml @@ -1,6 +1,9 @@ command: docker buildx history rm short: Remove build records -long: Remove build records +long: |- + Remove one or more build records from the current builder’s history. You can + remove specific builds by ID or offset, or delete all records at once using + the `--all` flag. usage: docker buildx history rm [OPTIONS] [REF...] pname: docker buildx history plink: docker_buildx_history.yaml @@ -9,6 +12,7 @@ options: value_type: bool default_value: "false" description: Remove all build records + details_url: '#all' deprecated: false hidden: false experimental: false @@ -36,6 +40,32 @@ inherited_options: experimentalcli: false kubernetes: false swarm: false +examples: |- + ### Remove a specific build + + ```console + # Using a build ID + docker buildx history rm qu2gsuo8ejqrwdfii23xkkckt + + # Or using a relative offset + docker buildx history rm ^1 + ``` + + ### Remove multiple builds + + ```console + # Using build IDs + docker buildx history rm qu2gsuo8ejqrwdfii23xkkckt qsiifiuf1ad9pa9qvppc0z1l3 + + # Or using relative offsets + docker buildx history rm ^1 ^2 + ``` + + ### Remove all build records from the current builder (--all) {#all} + + ```console + docker buildx history rm --all + ``` deprecated: false hidden: false experimental: false diff --git a/data/buildx/docker_buildx_history_trace.yaml b/data/buildx/docker_buildx_history_trace.yaml new file mode 100644 index 000000000000..866f7673843d --- /dev/null +++ b/data/buildx/docker_buildx_history_trace.yaml @@ -0,0 +1,104 @@ +command: docker buildx history trace +short: Show the OpenTelemetry trace of a build record +long: |- + View the OpenTelemetry trace for a completed build. This command loads the + trace into a Jaeger UI viewer and opens it in your browser. + + This helps analyze build performance, step timing, and internal execution flows. +usage: docker buildx history trace [OPTIONS] [REF] +pname: docker buildx history +plink: docker_buildx_history.yaml +options: + - option: addr + value_type: string + default_value: 127.0.0.1:0 + description: Address to bind the UI server + details_url: '#addr' + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: compare + value_type: string + description: Compare with another build record + details_url: '#compare' + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +inherited_options: + - option: builder + value_type: string + description: Override the configured builder instance + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: debug + shorthand: D + value_type: bool + default_value: "false" + description: Enable debug logging + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +examples: |- + ### Open the OpenTelemetry trace for the most recent build + + This command starts a temporary Jaeger UI server and opens your default browser + to view the trace. + + ```console + docker buildx history trace + ``` + + ### Open the trace for a specific build + + ```console + # Using a build ID + docker buildx history trace qu2gsuo8ejqrwdfii23xkkckt + + # Or using a relative offset + docker buildx history trace ^1 + ``` + + ### Run the Jaeger UI on a specific port (--addr) {#addr} + + ```console + # Using a build ID + docker buildx history trace qu2gsuo8ejqrwdfii23xkkckt --addr 127.0.0.1:16686 + + # Or using a relative offset + docker buildx history trace ^1 --addr 127.0.0.1:16686 + ``` + + ### Compare two build traces (--compare) {#compare} + + Compare two specific builds by name: + + ```console + # Using build IDs + docker buildx history trace --compare=qu2gsuo8ejqrwdfii23xkkckt qsiifiuf1ad9pa9qvppc0z1l3 + + # Or using a single relative offset + docker buildx history trace --compare=^1 + ``` + + When you use a single reference with `--compare`, it compares that build + against the most recent one. +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/data/buildx/docker_buildx_imagetools_create.yaml b/data/buildx/docker_buildx_imagetools_create.yaml index 4bcb085db3dd..3ade4138ddc9 100644 --- a/data/buildx/docker_buildx_imagetools_create.yaml +++ b/data/buildx/docker_buildx_imagetools_create.yaml @@ -10,7 +10,7 @@ long: |- a list or index, the output will be a manifest list, however you can disable this behavior with `--prefer-index=false` which attempts to preserve the source manifest format in the output. -usage: docker buildx imagetools create [OPTIONS] [SOURCE] [SOURCE...] +usage: docker buildx imagetools create [OPTIONS] [SOURCE...] pname: docker buildx imagetools plink: docker_buildx_imagetools.yaml options: diff --git a/data/buildx/docker_buildx_rm.yaml b/data/buildx/docker_buildx_rm.yaml index 94eed6c8da39..b5c632ebed1c 100644 --- a/data/buildx/docker_buildx_rm.yaml +++ b/data/buildx/docker_buildx_rm.yaml @@ -3,7 +3,7 @@ short: Remove one or more builder instances long: |- Removes the specified or current builder. It is a no-op attempting to remove the default builder. -usage: docker buildx rm [OPTIONS] [NAME] [NAME...] +usage: docker buildx rm [OPTIONS] [NAME...] pname: docker buildx plink: docker_buildx.yaml options: diff --git a/data/debug-cli/docker_debug.yaml b/data/debug-cli/docker_debug.yaml index a26d341a128b..461376e43691 100644 --- a/data/debug-cli/docker_debug.yaml +++ b/data/debug-cli/docker_debug.yaml @@ -1,11 +1,6 @@ command: docker debug short: Get a shell into any container or image. An alternative to debugging with `docker exec`. long: |- - > **Note** - > - > Docker Debug requires a [Pro, Team, or Business subcription](/subscription/details/). - > You must [sign in](/desktop/get-started/) to use this command. - Docker Debug is a CLI command that helps you follow best practices by keeping your images small and secure. With Docker Debug, you can debug your images while they contain the bare minimum to run your application. It does this by letting you create and work with slim images or containers that are often difficult to debug because all tools have been removed. @@ -27,7 +22,7 @@ long: |- - `entrypoint`: Print, lint, or run the entrypoint, see [example](#understanding-the-default-startup-command-of-a-container-entry-points). - `builtins`: Show custom builtin tools. - > **Note** + > [!NOTE] > > For images and stopped containers, all changes are discarded when leaving the shell. > At no point, do changes affect the actual image or container. diff --git a/data/desktop-cli/docker_desktop.yaml b/data/desktop-cli/docker_desktop.yaml index a222a240064d..f65ec5666d19 100644 --- a/data/desktop-cli/docker_desktop.yaml +++ b/data/desktop-cli/docker_desktop.yaml @@ -10,14 +10,24 @@ cname: - docker desktop restart - docker desktop status - docker desktop engine + - docker desktop update + - docker desktop logs + - docker desktop disable + - docker desktop enable + - docker desktop version clink: - docker_desktop_start.yaml - docker_desktop_stop.yaml - docker_desktop_restart.yaml - docker_desktop_status.yaml - docker_desktop_engine.yaml + - docker_desktop_update.yaml + - docker_desktop_logs.yaml + - docker_desktop_disable.yaml + - docker_desktop_enable.yaml + - docker_desktop_version.yaml deprecated: false -hidden: false +hidden: true experimental: false experimentalcli: false kubernetes: false diff --git a/data/desktop-cli/docker_desktop_disable.yaml b/data/desktop-cli/docker_desktop_disable.yaml new file mode 100644 index 000000000000..5cfb99f7e210 --- /dev/null +++ b/data/desktop-cli/docker_desktop_disable.yaml @@ -0,0 +1,15 @@ +command: docker desktop disable +short: Disable a feature +long: Disable an individual feature +pname: docker desktop +plink: docker_desktop.yaml +cname: + - docker desktop disable model-runner +clink: + - docker_desktop_disable_model-runner.yaml +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false \ No newline at end of file diff --git a/data/desktop-cli/docker_desktop_disable_model_runner.yaml b/data/desktop-cli/docker_desktop_disable_model_runner.yaml new file mode 100644 index 000000000000..c7505e41cb56 --- /dev/null +++ b/data/desktop-cli/docker_desktop_disable_model_runner.yaml @@ -0,0 +1,12 @@ +command: docker desktop disable model-runner +short: Disable Docker Model Runner +long: Disable Docker Model Runner +usage: docker desktop disable model-runner +pname: docker desktop disable +plink: docker_desktop_disable.yaml +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false \ No newline at end of file diff --git a/data/desktop-cli/docker_desktop_enable.yaml b/data/desktop-cli/docker_desktop_enable.yaml new file mode 100644 index 000000000000..d03f463a7dc9 --- /dev/null +++ b/data/desktop-cli/docker_desktop_enable.yaml @@ -0,0 +1,15 @@ +command: docker desktop enable +short: Enable a feature +long: Enable or manage an individual feature +pname: docker desktop +plink: docker_desktop.yaml +cname: + - docker desktop enable model-runner +clink: + - docker_desktop_enable_model-runner.yaml +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false \ No newline at end of file diff --git a/data/desktop-cli/docker_desktop_enable_model_runner.yaml b/data/desktop-cli/docker_desktop_enable_model_runner.yaml new file mode 100644 index 000000000000..4e163b0a4298 --- /dev/null +++ b/data/desktop-cli/docker_desktop_enable_model_runner.yaml @@ -0,0 +1,34 @@ +command: docker desktop enable model-runner +short: Manage Docker Model Runner settings +long: Enable and manage Docker Model Runner settings used by 'docker model' +usage: docker desktop enable model-runner [OPTIONS] +pname: docker desktop enable +plink: docker_desktop_enable.yaml +options: + - option: no-tcp + value_type: bool + default_value: "false" + description: Disable TCP connection. Cannot be used with --tcp. + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: tcp + value_type: port + default_value: "12434" + description: | + Enable or change TCP port for connection (1-65535). Cannot be used with --no-tcp. + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false \ No newline at end of file diff --git a/data/desktop-cli/docker_desktop_engine_ls.yaml b/data/desktop-cli/docker_desktop_engine_ls.yaml index 52a1012d06dd..3c64229e6287 100644 --- a/data/desktop-cli/docker_desktop_engine_ls.yaml +++ b/data/desktop-cli/docker_desktop_engine_ls.yaml @@ -3,6 +3,17 @@ short: List available engines (Windows only) usage: docker desktop engine ls pname: docker desktop engine plink: docker_desktop_engine.yaml +options: + - option: format + value_type: string + default_value: pretty + description: 'Format the output. Accepted values are: pretty, json' + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false deprecated: false hidden: false experimental: false diff --git a/data/desktop-cli/docker_desktop_logs.yaml b/data/desktop-cli/docker_desktop_logs.yaml new file mode 100644 index 000000000000..3b447555e47d --- /dev/null +++ b/data/desktop-cli/docker_desktop_logs.yaml @@ -0,0 +1,99 @@ +command: docker desktop logs +short: Print log entries for Docker Desktop +usage: docker desktop logs [OPTIONS] +pname: docker desktop +plink: docker_desktop.yaml +options: + - option: boot + shorthand: b + value_type: int + default_value: 0 + description: Show logs from a specified boot. Zero means the current or boot, one the second last boot, and so on + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: color + shorthand: c + value_type: bool + default_value: false + description: Enable colored output. Priority levels are highlighted. + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: color-mode + shorthand: m + value_type: bool + default_value: default + description: Color mode to use. Can be `default` or `priority` + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: directory + shorthand: D + value_type: bool + default_value: false + description: Specifies a custom directory to search for log entries + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: priority + shorthand: p + value_type: int + default_value: -1 + description: Filter output by log priorities. `-1` is all, `0` is info or above, `1` filters for warnings or above, `2` filters for errors. + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: since + shorthand: S + value_type: string + description: Start showing entries on or newer than the specified date and time. Uses the systemd.time(7) format. + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: unit + shorthand: u + value_type: stringSlice + default_value: '[]' + description: Filter by one or more categories (e.g. `--unit=com.docker.backend.ipc`, `com.docker.backend.apiproxy`) + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: until + shorthand: U + value_type: string + default_value: false + description: Start showing entries on or before the specified date and time. Uses the systemd.time(7) format. + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false \ No newline at end of file diff --git a/data/desktop-cli/docker_desktop_module.yaml b/data/desktop-cli/docker_desktop_module.yaml new file mode 100644 index 000000000000..4e044778be32 --- /dev/null +++ b/data/desktop-cli/docker_desktop_module.yaml @@ -0,0 +1,19 @@ +command: docker desktop module +short: Manage Docker Desktop modules +long: Manage Docker Desktop modules +pname: docker desktop +plink: docker_desktop.yaml +cname: + - docker desktop module ls + - docker desktop module reset + - docker desktop module update +clink: + - docker_desktop_module_ls.yaml + - docker_desktop_module_reset.yaml + - docker_desktop_module_update.yaml +deprecated: false +hidden: true +experimental: false +experimentalcli: true +kubernetes: false +swarm: false \ No newline at end of file diff --git a/data/desktop-cli/docker_desktop_module_ls.yaml b/data/desktop-cli/docker_desktop_module_ls.yaml new file mode 100644 index 000000000000..953cd1cce793 --- /dev/null +++ b/data/desktop-cli/docker_desktop_module_ls.yaml @@ -0,0 +1,35 @@ +command: docker desktop module ls +aliases: docker desktop module ls, docker desktop module list +short: List modules +long: List modules +usage: docker desktop module ls +pname: docker desktop module +plink: docker_desktop_module.yaml +options: + - option: format + value_type: string + default_value: pretty + description: 'Format the output. Values: [pretty | json].' + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: quiet + shorthand: q + value_type: bool + default_value: "false" + description: Only display IDs. + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: true +kubernetes: false +swarm: false \ No newline at end of file diff --git a/data/desktop-cli/docker_desktop_module_reset.yaml b/data/desktop-cli/docker_desktop_module_reset.yaml new file mode 100644 index 000000000000..f3fdc72b97c1 --- /dev/null +++ b/data/desktop-cli/docker_desktop_module_reset.yaml @@ -0,0 +1,12 @@ +command: docker desktop module reset +short: Reset all updated modules +long: Reset all updated modules +usage: docker desktop module reset +pname: docker desktop module +plink: docker_desktop_module.yaml +deprecated: false +hidden: false +experimental: false +experimentalcli: true +kubernetes: false +swarm: false \ No newline at end of file diff --git a/data/desktop-cli/docker_desktop_module_update.yaml b/data/desktop-cli/docker_desktop_module_update.yaml new file mode 100644 index 000000000000..2013c51d5e32 --- /dev/null +++ b/data/desktop-cli/docker_desktop_module_update.yaml @@ -0,0 +1,12 @@ +command: docker desktop module update +short: Update all modules +long: Update all modules +usage: docker desktop module update +pname: docker desktop module +plink: docker_desktop_module.yaml +deprecated: false +hidden: false +experimental: false +experimentalcli: true +kubernetes: false +swarm: false \ No newline at end of file diff --git a/data/desktop-cli/docker_desktop_restart.yaml b/data/desktop-cli/docker_desktop_restart.yaml index 4809ede15448..62653e3d4bad 100644 --- a/data/desktop-cli/docker_desktop_restart.yaml +++ b/data/desktop-cli/docker_desktop_restart.yaml @@ -3,6 +3,29 @@ short: Restart Docker Desktop usage: docker desktop restart pname: docker desktop plink: docker_desktop.yaml +options: + - option: detach + shorthand: d + value_type: bool + default_value: "false" + description: Do not synchronously wait for the requested operation to complete. + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: timeout + value_type: seconds + default_value: "0" + description: | + Terminate the running command after the specified timeout with a non-zero exit code. A value of zero (the default) or -1 means no timeout. + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false deprecated: false hidden: false experimental: false diff --git a/data/desktop-cli/docker_desktop_start.yaml b/data/desktop-cli/docker_desktop_start.yaml index 3422aa8873d2..95413bef75c1 100644 --- a/data/desktop-cli/docker_desktop_start.yaml +++ b/data/desktop-cli/docker_desktop_start.yaml @@ -5,9 +5,10 @@ pname: docker desktop plink: docker_desktop.yaml options: - option: detach + shorthand: d value_type: bool default_value: false - description: Start Docker Desktop in the background + description: Do not synchronously wait for the requested operation to complete. deprecated: false hidden: false experimental: false @@ -15,9 +16,9 @@ options: kubernetes: false swarm: false - option: timeout - value_type: init + value_type: seconds default_value: 0 - description: Specify in seconds how long to wait for Docker Desktop to start before timing out + description: Terminate the running command after the specified timeout with a non-zero exit code. A value of zero (the default) or -1 means no timeout. deprecated: false hidden: false experimental: false diff --git a/data/desktop-cli/docker_desktop_status.yaml b/data/desktop-cli/docker_desktop_status.yaml index e69a46a6e17d..85afef084e73 100644 --- a/data/desktop-cli/docker_desktop_status.yaml +++ b/data/desktop-cli/docker_desktop_status.yaml @@ -3,6 +3,17 @@ short: Display Docker Desktop's status usage: docker desktop status pname: docker desktop plink: docker_desktop.yaml +options: + - option: format + value_type: string + default_value: pretty + description: 'Format the output. Accepted values are: pretty, json' + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false deprecated: false hidden: false experimental: false diff --git a/data/desktop-cli/docker_desktop_stop.yaml b/data/desktop-cli/docker_desktop_stop.yaml index e0b775a4e6f2..98d0ff7e12e6 100644 --- a/data/desktop-cli/docker_desktop_stop.yaml +++ b/data/desktop-cli/docker_desktop_stop.yaml @@ -5,9 +5,10 @@ pname: docker desktop plink: docker_desktop.yaml options: - option: detach + shorthand: d value_type: bool default_value: false - description: Stop Docker Desktop in the background + description: Do not synchronously wait for the requested operation to complete deprecated: false hidden: false experimental: false @@ -17,6 +18,7 @@ options: - option: force value_type: bool default_value: false + description: Force Docker Desktop to stop deprecated: false hidden: false experimental: false @@ -24,9 +26,9 @@ options: kubernetes: false swarm: false - option: timeout - value_type: init + value_type: seconds default_value: 0 - description: Specify in seconds how long to wait for Docker Desktop to stop before timing out + description: Terminate the running command after the specified timeout with a non-zero exit code. A value of zero (the default) or -1 means no timeout deprecated: false hidden: false experimental: false diff --git a/data/desktop-cli/docker_desktop_update.yaml b/data/desktop-cli/docker_desktop_update.yaml new file mode 100644 index 000000000000..aa50cdcef118 --- /dev/null +++ b/data/desktop-cli/docker_desktop_update.yaml @@ -0,0 +1,34 @@ +command: docker desktop update +short: Manage Docker Desktop updates +usage: docker desktop update [OPTIONS] +pname: docker desktop +plink: docker_desktop.yaml +options: + - option: check-only + shorthand: k + value_type: bool + default_value: false + description: Check for updates without applying them + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: quiet + shorthand: q + value_type: bool + default_value: false + description: Quietly check and apply updates + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false \ No newline at end of file diff --git a/data/desktop-cli/docker_desktop_version.yaml b/data/desktop-cli/docker_desktop_version.yaml new file mode 100644 index 000000000000..d52f6315ced9 --- /dev/null +++ b/data/desktop-cli/docker_desktop_version.yaml @@ -0,0 +1,33 @@ +command: docker desktop version +short: Show the Docker Desktop CLI plugin version information +long: Show the Docker Desktop CLI plugin version information +usage: docker desktop version [OPTIONS] +pname: docker desktop +plink: docker_desktop.yaml +options: + - option: format + shorthand: f + value_type: string + description: 'Format the output. Values: [pretty | json]. (Default: pretty)' + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: short + value_type: bool + default_value: "false" + description: Shows only the version number + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false \ No newline at end of file diff --git a/data/engine-cli/docker.yaml b/data/engine-cli/docker.yaml index 4f19fed062fd..328d49de8362 100644 --- a/data/engine-cli/docker.yaml +++ b/data/engine-cli/docker.yaml @@ -339,6 +339,7 @@ long: |- list of root Certificate Authorities. cname: - docker attach + - docker bake - docker build - docker builder - docker checkpoint @@ -397,6 +398,7 @@ cname: - docker wait clink: - docker_attach.yaml + - docker_bake.yaml - docker_build.yaml - docker_builder.yaml - docker_checkpoint.yaml diff --git a/data/engine-cli/docker_bake.yaml b/data/engine-cli/docker_bake.yaml new file mode 100644 index 000000000000..14f8ab4c6845 --- /dev/null +++ b/data/engine-cli/docker_bake.yaml @@ -0,0 +1,26 @@ +command: docker bake +aliases: docker buildx bake +short: Build from a file +long: Build from a file +usage: docker bake [OPTIONS] [TARGET...] +pname: docker +plink: docker.yaml +inherited_options: + - option: help + value_type: bool + default_value: "false" + description: Print usage + deprecated: false + hidden: true + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +min_api_version: "1.31" +experimental: false +experimentalcli: false +kubernetes: false +swarm: false + diff --git a/data/engine-cli/docker_container_create.yaml b/data/engine-cli/docker_container_create.yaml index ed89811b8398..eb25769b16cc 100644 --- a/data/engine-cli/docker_container_create.yaml +++ b/data/engine-cli/docker_container_create.yaml @@ -971,6 +971,16 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: use-api-socket + value_type: bool + default_value: "false" + description: Bind mount Docker API socket and required auth + deprecated: false + hidden: false + experimental: false + experimentalcli: true + kubernetes: false + swarm: false - option: user shorthand: u value_type: string diff --git a/data/engine-cli/docker_container_restart.yaml b/data/engine-cli/docker_container_restart.yaml index 464e1454ae32..ab621abde102 100644 --- a/data/engine-cli/docker_container_restart.yaml +++ b/data/engine-cli/docker_container_restart.yaml @@ -18,11 +18,22 @@ options: kubernetes: false swarm: false - option: time + value_type: int + default_value: "0" + description: | + Seconds to wait before killing the container (deprecated: use --timeout) + deprecated: true + hidden: true + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: timeout shorthand: t value_type: int default_value: "0" description: Seconds to wait before killing the container - details_url: '#time' + details_url: '#timeout' deprecated: false hidden: false experimental: false @@ -61,14 +72,14 @@ examples: |- option when creating the container. If no signal is configured for the container, `SIGTERM` is used as default. - ### Stop container with timeout (-t, --timeout) {#time} + ### Stop container with timeout (-t, --timeout) {#timeout} - The `--time` flag sets the number of seconds to wait for the container - to stop after sending the pre-defined (see [`--signal`]{#signal)) system call signal. + The `--timeout` flag sets the number of seconds to wait for the container + to stop after sending the pre-defined (see [`--signal`](#signal)) system call signal. If the container does not exit after the timeout elapses, it's forcibly killed with a `SIGKILL` signal. - If you set `--time` to `-1`, no timeout is applied, and the daemon + If you set `--timeout` to `-1`, no timeout is applied, and the daemon waits indefinitely for the container to exit. The default timeout can be specified using the [`--stop-timeout`](/reference/cli/docker/container/run/#stop-timeout) diff --git a/data/engine-cli/docker_container_run.yaml b/data/engine-cli/docker_container_run.yaml index 0312bce2776c..2618bc43de2c 100644 --- a/data/engine-cli/docker_container_run.yaml +++ b/data/engine-cli/docker_container_run.yaml @@ -1024,6 +1024,16 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: use-api-socket + value_type: bool + default_value: "false" + description: Bind mount Docker API socket and required auth + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false - option: user shorthand: u value_type: string @@ -1356,7 +1366,7 @@ examples: |- ### Set working directory (-w, --workdir) {#workdir} ```console - $ docker run -w /path/to/dir/ -i -t ubuntu pwd + $ docker run -w /path/to/dir/ ubuntu pwd ``` The `-w` option runs the command executed inside the directory specified, in this example, @@ -1718,15 +1728,16 @@ examples: |- for the `--network` flag. Comma-separated options that can be specified in the extended `--network` syntax are: - | Option | Top-level Equivalent | Description | - |-----------------|---------------------------------------|-------------------------------------------------| - | `name` | | The name of the network (mandatory) | - | `alias` | `--network-alias` | Add network-scoped alias for the container | - | `ip` | `--ip` | IPv4 address (e.g., 172.30.100.104) | - | `ip6` | `--ip6` | IPv6 address (e.g., 2001:db8::33) | - | `mac-address` | `--mac-address` | Container MAC address (e.g., 92:d0:c6:0a:29:33) | - | `link-local-ip` | `--link-local-ip` | Container IPv4/IPv6 link-local addresses | - | `driver-opt` | `docker network connect --driver-opt` | Network driver options | + | Option | Top-level Equivalent | Description | + |-----------------|---------------------------------------|-----------------------------------------------------------------------------------------| + | `name` | | The name of the network (mandatory) | + | `alias` | `--network-alias` | Add network-scoped alias for the container | + | `ip` | `--ip` | IPv4 address (e.g., 172.30.100.104) | + | `ip6` | `--ip6` | IPv6 address (e.g., 2001:db8::33) | + | `mac-address` | `--mac-address` | Container MAC address (e.g., 92:d0:c6:0a:29:33) | + | `link-local-ip` | `--link-local-ip` | Container IPv4/IPv6 link-local addresses | + | `driver-opt` | `docker network connect --driver-opt` | Network driver options | + | `gw-priority` | | Highest gw-priority provides the default gateway. Accepts positive and negative values. | ```console $ docker network create --subnet 192.0.2.0/24 my-net1 @@ -1904,15 +1915,14 @@ examples: |- #### CDI devices - > [!NOTE] - > The CDI feature is experimental, and potentially subject to change. - > CDI is currently only supported for Linux containers. - [Container Device Interface (CDI)](https://github.com/cncf-tags/container-device-interface/blob/main/SPEC.md) is a standardized mechanism for container runtimes to create containers which are able to interact with third party devices. + CDI is currently only supported for Linux containers and is enabled by default + since Docker Engine 28.3.0. + With CDI, device configurations are declaratively defined using a JSON or YAML file. In addition to enabling the container to interact with the device node, it also lets you specify additional configuration for the device, such as @@ -1933,7 +1943,7 @@ examples: |- available on the system running the daemon, in one of the configured CDI specification directories. - The CDI feature has been enabled in the daemon; see [Enable CDI - devices](/reference/cli/dockerd/#enable-cdi-devices). + devices](/reference/cli/dockerd/#configure-cdi-devices). ### Attach to STDIN/STDOUT/STDERR (-a, --attach) {#attach} @@ -2317,6 +2327,26 @@ examples: |- > $ docker run -it --ulimit as=1024 fedora /bin/bash > ``` + #### Supported options for `--ulimit`: + + | Option | Description | + |:-------------|:----------------------------------------------------------| + | `core` | Maximum size of core files created (`RLIMIT_CORE`) | + | `cpu` | CPU time limit in seconds (`RLIMIT_CPU`) | + | `data` | Maximum data segment size (`RLIMIT_DATA`) | + | `fsize` | Maximum file size (`RLIMIT_FSIZE`) | + | `locks` | Maximum number of file locks (`RLIMIT_LOCKS`) | + | `memlock` | Maximum locked-in-memory address space (`RLIMIT_MEMLOCK`) | + | `msgqueue` | Maximum bytes in POSIX message queues (`RLIMIT_MSGQUEUE`) | + | `nice` | Maximum nice priority adjustment (`RLIMIT_NICE`) | + | `nofile` | Maximum number of open file descriptors (`RLIMIT_NOFILE`) | + | `nproc` | Maximum number of processes available (`RLIMIT_NPROC`) | + | `rss` | Maximum resident set size (`RLIMIT_RSS`) | + | `rtprio` | Maximum real-time scheduling priority (`RLIMIT_RTPRIO`) | + | `rttime` | Maximum real-time execution time (`RLIMIT_RTTIME`) | + | `sigpending` | Maximum number of pending signals (`RLIMIT_SIGPENDING`) | + | `stack` | Maximum stack size (`RLIMIT_STACK`) | + Docker sends the values to the appropriate OS `syscall` and doesn't perform any byte conversion. Take this into account when setting the values. diff --git a/data/engine-cli/docker_container_stop.yaml b/data/engine-cli/docker_container_stop.yaml index 77c07cab6390..61d4a5b91036 100644 --- a/data/engine-cli/docker_container_stop.yaml +++ b/data/engine-cli/docker_container_stop.yaml @@ -22,11 +22,22 @@ options: kubernetes: false swarm: false - option: time + value_type: int + default_value: "0" + description: | + Seconds to wait before killing the container (deprecated: use --timeout) + deprecated: true + hidden: true + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: timeout shorthand: t value_type: int default_value: "0" description: Seconds to wait before killing the container - details_url: '#time' + details_url: '#timeout' deprecated: false hidden: false experimental: false @@ -64,14 +75,14 @@ examples: |- option when creating the container. If no signal is configured for the container, `SIGTERM` is used as default. - ### Stop container with timeout (-t, --timeout) {#time} + ### Stop container with timeout (-t, --timeout) {#timeout} - The `--time` flag sets the number of seconds to wait for the container + The `--timeout` flag sets the number of seconds to wait for the container to stop after sending the pre-defined (see [`--signal`]{#signal)) system call signal. If the container does not exit after the timeout elapses, it's forcibly killed with a `SIGKILL` signal. - If you set `--time` to `-1`, no timeout is applied, and the daemon + If you set `--timeout` to `-1`, no timeout is applied, and the daemon waits indefinitely for the container to exit. The default timeout can be specified using the [`--stop-timeout`](/reference/cli/docker/container/run/#stop-timeout) diff --git a/data/engine-cli/docker_create.yaml b/data/engine-cli/docker_create.yaml index 883160b59ba7..4266c3d11c4f 100644 --- a/data/engine-cli/docker_create.yaml +++ b/data/engine-cli/docker_create.yaml @@ -954,6 +954,16 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: use-api-socket + value_type: bool + default_value: "false" + description: Bind mount Docker API socket and required auth + deprecated: false + hidden: false + experimental: false + experimentalcli: true + kubernetes: false + swarm: false - option: user shorthand: u value_type: string diff --git a/data/engine-cli/docker_history.yaml b/data/engine-cli/docker_history.yaml index bd9d9491a2db..643f822aa50a 100644 --- a/data/engine-cli/docker_history.yaml +++ b/data/engine-cli/docker_history.yaml @@ -42,6 +42,17 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: platform + value_type: string + description: | + Show history for the given platform. Formatted as `os[/arch[/variant]]` (e.g., `linux/amd64`) + deprecated: false + hidden: false + min_api_version: "1.48" + experimental: false + experimentalcli: false + kubernetes: false + swarm: false - option: quiet shorthand: q value_type: bool diff --git a/data/engine-cli/docker_image_history.yaml b/data/engine-cli/docker_image_history.yaml index 394734d80bd5..844acf52c15f 100644 --- a/data/engine-cli/docker_image_history.yaml +++ b/data/engine-cli/docker_image_history.yaml @@ -43,6 +43,18 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: platform + value_type: string + description: | + Show history for the given platform. Formatted as `os[/arch[/variant]]` (e.g., `linux/amd64`) + details_url: '#platform' + deprecated: false + hidden: false + min_api_version: "1.48" + experimental: false + experimentalcli: false + kubernetes: false + swarm: false - option: quiet shorthand: q value_type: bool @@ -121,6 +133,57 @@ examples: |- f6e427c148a7: 4 weeks ago : 4 weeks ago ``` + + ### Show history for a specific platform (--platform) {#platform} + + The `--platform` option allows you to specify which platform variant to show + history for if multiple platforms are present. By default, `docker history` + shows the history for the daemon's native platform or if not present, the + first available platform. + + If the local image store has multiple platform variants of an image, the + `--platform` option selects which variant to show the history for. An error + is produced if the given platform is not present in the local image cache. + + The platform option takes the `os[/arch[/variant]]` format; for example, + `linux/amd64` or `linux/arm64/v8`. Architecture and variant are optional, + and if omitted falls back to the daemon's defaults. + + + The following example pulls the RISC-V variant of the `alpine:latest` image + and shows its history. + + + ```console + $ docker image pull --quiet --platform=linux/riscv64 alpine + docker.io/library/alpine:latest + + $ docker image history --platform=linux/s390x alpine + IMAGE CREATED CREATED BY SIZE COMMENT + beefdbd8a1da 3 weeks ago /bin/sh -c #(nop) CMD ["/bin/sh"] 0B + 3 weeks ago /bin/sh -c #(nop) ADD file:ba2637314e600db5a… 8.46MB + ``` + + The following example attempts to show the history for a platform variant of + `alpine:latest` that doesn't exist in the local image store, resulting in + an error. + + ```console + $ docker image ls --tree + IMAGE ID DISK USAGE CONTENT SIZE IN USE + alpine:latest beefdbd8a1da 10.6MB 3.37MB + ├─ linux/riscv64 80cde017a105 10.6MB 3.37MB + ├─ linux/amd64 33735bd63cf8 0B 0B + ├─ linux/arm/v6 50f635c8b04d 0B 0B + ├─ linux/arm/v7 f2f82d424957 0B 0B + ├─ linux/arm64/v8 9cee2b382fe2 0B 0B + ├─ linux/386 b3e87f642f5c 0B 0B + ├─ linux/ppc64le c7a6800e3dc5 0B 0B + └─ linux/s390x 2b5b26e09ca2 0B 0B + + $ docker image history --platform=linux/s390x alpine + Error response from daemon: image with reference alpine:latest was found but does not match the specified platform: wanted linux/s390x + ``` deprecated: false hidden: false experimental: false diff --git a/data/engine-cli/docker_image_import.yaml b/data/engine-cli/docker_image_import.yaml index d96ef3c3e1c9..4c95de8351dc 100644 --- a/data/engine-cli/docker_image_import.yaml +++ b/data/engine-cli/docker_image_import.yaml @@ -9,10 +9,6 @@ long: |- (root). If you specify an individual file, you must specify the full path within the host. To import from a remote location, specify a `URI` that begins with the `http://` or `https://` protocol. - - The `--change` option applies `Dockerfile` instructions to the image that is - created. Supported `Dockerfile` instructions: - `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` usage: docker image import [OPTIONS] file|URL|- [REPOSITORY[:TAG]] pname: docker image plink: docker_image.yaml @@ -21,6 +17,7 @@ options: shorthand: c value_type: list description: Apply Dockerfile instruction to the created image + details_url: '#change' deprecated: false hidden: false experimental: false @@ -31,6 +28,7 @@ options: shorthand: m value_type: string description: Set commit message for imported image + details_url: '#message' deprecated: false hidden: false experimental: false @@ -40,6 +38,7 @@ options: - option: platform value_type: string description: Set platform if server is multi-platform capable + details_url: '#platform' deprecated: false hidden: false min_api_version: "1.32" @@ -75,12 +74,6 @@ examples: |- $ cat exampleimage.tgz | docker import - exampleimagelocal:new ``` - Import with a commit message. - - ```console - $ cat exampleimage.tgz | docker import --message "New image imported from tarball" - exampleimagelocal:new - ``` - Import to docker from a local archive. ```console @@ -93,16 +86,109 @@ examples: |- $ sudo tar -c . | docker import - exampleimagedir ``` - ### Import from a local directory with new configurations - - ```console - $ sudo tar -c . | docker import --change "ENV DEBUG=true" - exampleimagedir - ``` - Note the `sudo` in this example – you must preserve the ownership of the files (especially root ownership) during the archiving with tar. If you are not root (or the sudo command) when you tar, then the ownerships might not get preserved. + + ### Import with new configurations (-c, --change) {#change} + + The `--change` option applies `Dockerfile` instructions to the image that is + created. Not all `Dockerfile` instructions are supported; the list of instructions + is limited to metadata (configuration) changes. The following `Dockerfile` + instructions are supported: + + - [`CMD`](/reference/dockerfile/#cmd) + - [`ENTRYPOINT`](/reference/dockerfile/#entrypoint) + - [`ENV`](/reference/dockerfile/#env) + - [`EXPOSE`](/reference/dockerfile/#expose) + - [`HEALTHCHECK`](/reference/dockerfile/#healthcheck) + - [`LABEL`](/reference/dockerfile/#label) + - [`ONBUILD`](/reference/dockerfile/#onbuild) + - [`STOPSIGNAL`](/reference/dockerfile/#stopsignal) + - [`USER`](/reference/dockerfile/#user) + - [`VOLUME`](/reference/dockerfile/#volume) + - [`WORKDIR`](/reference/dockerfile/#workdir) + + The following example imports an image from a TAR-file containing a root-filesystem, + and sets the `DEBUG` environment-variable in the resulting image: + + ```console + $ docker import --change "ENV DEBUG=true" ./rootfs.tgz exampleimagedir + ``` + + The `--change` option can be set multiple times to apply multiple `Dockerfile` + instructions. The example below sets the `LABEL1` and `LABEL2` labels on + the imported image, in addition to the `DEBUG` environment variable from + the previous example: + + ```console + $ docker import \ + --change "ENV DEBUG=true" \ + --change "LABEL LABEL1=hello" \ + --change "LABEL LABEL2=world" \ + ./rootfs.tgz exampleimagedir + ``` + + ### Import with a commit message (-m, --message) {#message} + + The `--message` (or `-m`) option allows you to set a custom comment in + the image's metadata. The following example imports an image from a local + archive and sets a custom message. + + ```console + $ docker import --message "New image imported from tarball" ./rootfs.tgz exampleimagelocal:new + sha256:25e54c0df7dc49da9093d50541e0ed4508a6b78705057f1a9bebf1d564e2cb00 + ``` + + After importing, the message is set in the "Comment" field of the image's + configuration, which is shown when viewing the image's history: + + ```console + $ docker image history exampleimagelocal:new + + IMAGE CREATED CREATED BY SIZE COMMENT + 25e54c0df7dc 2 minutes ago 53.6MB New image imported from tarball + ``` + + ### When the daemon supports multiple operating systems + + If the daemon supports multiple operating systems, and the image being imported + does not match the default operating system, it may be necessary to add + `--platform`. This would be necessary when importing a Linux image into a Windows + daemon. + + ```console + $ docker import --platform=linux .\linuximage.tar + ``` + + ### Set the platform for the imported image (--platform) {#platform} + + The `--platform` option allows you to specify the platform for the imported + image. By default, the daemon's native platform is used as platform, but + the `--platform` option allows you to override the default, for example, in + situations where the imported root filesystem is for a different architecture + or operating system. + + The platform option takes the `os[/arch[/variant]]` format; for example, + `linux/amd64` or `linux/arm64/v8`. Architecture and variant are optional, + and default to the daemon's native architecture if omitted. + + The following example imports an image from a root-filesystem in `rootfs.tgz`, + and sets the image's platform to `linux/amd64`; + + ```console + $ docker image import --platform=linux/amd64 ./rootfs.tgz imported:latest + sha256:44a8b44157dad5edcff85f0c93a3e455f3b20a046d025af4ec50ed990d7ebc09 + ``` + + After importing the image, the image's platform is set in the image's + configuration; + + ```console + $ docker image inspect --format '{{.Os}}/{{.Architecture}}' imported:latest + linux/amd64 + ``` deprecated: false hidden: false experimental: false diff --git a/data/engine-cli/docker_image_inspect.yaml b/data/engine-cli/docker_image_inspect.yaml index 25f22ef9205d..c80082132510 100644 --- a/data/engine-cli/docker_image_inspect.yaml +++ b/data/engine-cli/docker_image_inspect.yaml @@ -19,6 +19,19 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: platform + value_type: string + description: |- + Inspect a specific platform of the multi-platform image. + If the image or the server is not multi-platform capable, the command will error out if the platform does not match. + 'os[/arch[/variant]]': Explicit platform (eg. linux/amd64) + deprecated: false + hidden: false + min_api_version: "1.49" + experimental: false + experimentalcli: false + kubernetes: false + swarm: false inherited_options: - option: help value_type: bool diff --git a/data/engine-cli/docker_image_load.yaml b/data/engine-cli/docker_image_load.yaml index f49e1698784d..c4b82bb93c52 100644 --- a/data/engine-cli/docker_image_load.yaml +++ b/data/engine-cli/docker_image_load.yaml @@ -19,6 +19,18 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: platform + value_type: string + description: | + Load only the given platform variant. Formatted as `os[/arch[/variant]]` (e.g., `linux/amd64`) + details_url: '#platform' + deprecated: false + hidden: false + min_api_version: "1.48" + experimental: false + experimentalcli: false + kubernetes: false + swarm: false - option: quiet shorthand: q value_type: bool @@ -76,6 +88,35 @@ examples: |- fedora heisenbug 58394af37342 7 weeks ago 385.5 MB fedora latest 58394af37342 7 weeks ago 385.5 MB ``` + + + ### Load a specific platform (--platform) {#platform} + + The `--platform` option allows you to specify which platform variant of the + image to load. By default, `docker load` loads all platform variants that + are present in the archive. Use the `--platform` option to specify which + platform variant of the image to load. An error is produced if the given + platform is not present in the archive. + + The platform option takes the `os[/arch[/variant]]` format; for example, + `linux/amd64` or `linux/arm64/v8`. Architecture and variant are optional, + and default to the daemon's native architecture if omitted. + + The following example loads the `linux/amd64` variant of an `alpine` image + from an archive that contains multiple platform variants. + + ```console + $ docker image load -i image.tar --platform=linux/amd64 + Loaded image: alpine:latest + ``` + + The following example attempts to load a `linux/ppc64le` image from an + archive, but the given platform is not present in the archive; + + ```console + $ docker image load -i image.tar --platform=linux/ppc64le + requested platform (linux/ppc64le) not found: image might be filtered out + ``` deprecated: false hidden: false experimental: false diff --git a/data/engine-cli/docker_image_rm.yaml b/data/engine-cli/docker_image_rm.yaml index b178a831f13a..5fde9d9cd213 100644 --- a/data/engine-cli/docker_image_rm.yaml +++ b/data/engine-cli/docker_image_rm.yaml @@ -35,6 +35,19 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: platform + value_type: stringSlice + default_value: '[]' + description: | + Remove only the given platform variant. Formatted as `os[/arch[/variant]]` (e.g., `linux/amd64`) + details_url: '#platform' + deprecated: false + hidden: false + min_api_version: "1.50" + experimental: false + experimentalcli: false + kubernetes: false + swarm: false inherited_options: - option: help value_type: bool @@ -122,6 +135,76 @@ examples: |- Deleted: ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2 Deleted: df7546f9f060a2268024c8a230d8639878585defcc1bc6f79d2728a13957871b ``` + + ### Remove specific platforms (`--platform`) {#platform} + + The `--platform` option allows you to specify which platform variants of the + image to remove. By default, `docker image remove` removes all platform variants + that are present. Use the `--platform` option to specify which platform variant + of the image to remove. + + Removing a specific platform removes the image from all images that reference + the same content, and requires the `--force` option to be used. Omitting the + `--force` option produces a warning, and the remove is canceled: + + ```console + $ docker image rm --platform=linux/amd64 alpine + Error response from daemon: Content will be removed from all images referencing this variant. Use —-force to force delete. + ``` + + The platform option takes the `os[/arch[/variant]]` format; for example, + `linux/amd64` or `linux/arm64/v8`. Architecture and variant are optional, + and default to the daemon's native architecture if omitted. + + You can pass multiple platforms either by passing the `--platform` flag + multiple times, or by passing a comma-separated list of platforms to remove. + The following uses of this option are equivalent; + + ```console + $ docker image rm --plaform linux/amd64 --platform linux/ppc64le myimage + $ docker image rm --plaform linux/amd64,linux/ppc64le myimage + ``` + + The following example removes the `linux/amd64` and `linux/ppc64le` variants + of an `alpine` image that contains multiple platform variants in the image + cache: + + ```console + $ docker image ls --tree + + IMAGE ID DISK USAGE CONTENT SIZE EXTRA + alpine:latest a8560b36e8b8 37.8MB 11.2MB U + ├─ linux/amd64 1c4eef651f65 12.1MB 3.64MB U + ├─ linux/arm/v6 903bfe2ae994 0B 0B + ├─ linux/arm/v7 9c2d245b3c01 0B 0B + ├─ linux/arm64/v8 757d680068d7 12.8MB 3.99MB + ├─ linux/386 2436f2b3b7d2 0B 0B + ├─ linux/ppc64le 9ed53fd3b831 12.8MB 3.58MB + ├─ linux/riscv64 1de5eb4a9a67 0B 0B + └─ linux/s390x fe0dcdd1f783 0B 0B + + $ docker image --platform=linux/amd64,linux/ppc64le --force alpine + Deleted: sha256:1c4eef651f65e2f7daee7ee785882ac164b02b78fb74503052a26dc061c90474 + Deleted: sha256:9ed53fd3b83120f78b33685d930ce9bf5aa481f6e2d165c42cbbddbeaa196f6f + ``` + + After the command completes, the given variants of the `alpine` image are removed + from the image cache: + + ```console + $ docker image ls --tree + + IMAGE ID DISK USAGE CONTENT SIZE EXTRA + alpine:latest a8560b36e8b8 12.8MB 3.99MB + ├─ linux/amd64 1c4eef651f65 0B 0B + ├─ linux/arm/v6 903bfe2ae994 0B 0B + ├─ linux/arm/v7 9c2d245b3c01 0B 0B + ├─ linux/arm64/v8 757d680068d7 12.8MB 3.99MB + ├─ linux/386 2436f2b3b7d2 0B 0B + ├─ linux/ppc64le 9ed53fd3b831 0B 0B + ├─ linux/riscv64 1de5eb4a9a67 0B 0B + └─ linux/s390x fe0dcdd1f783 0B 0B + ``` deprecated: false hidden: false experimental: false diff --git a/data/engine-cli/docker_image_save.yaml b/data/engine-cli/docker_image_save.yaml index 571bebe43141..bbec0590cb8d 100644 --- a/data/engine-cli/docker_image_save.yaml +++ b/data/engine-cli/docker_image_save.yaml @@ -19,6 +19,18 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: platform + value_type: string + description: | + Save only the given platform variant. Formatted as `os[/arch[/variant]]` (e.g., `linux/amd64`) + details_url: '#platform' + deprecated: false + hidden: false + min_api_version: "1.48" + experimental: false + experimentalcli: false + kubernetes: false + swarm: false inherited_options: - option: help value_type: bool @@ -66,6 +78,55 @@ examples: |- ```console $ docker save -o ubuntu.tar ubuntu:lucid ubuntu:saucy ``` + + ### Save a specific platform (--platform) {#platform} + + The `--platform` option allows you to specify which platform variant of the + image to save. By default, `docker save` saves all platform variants that + are present in the daemon's image store. Use the `--platform` option + to specify which platform variant of the image to save. An error is produced + if the given platform is not present in the local image store. + + The platform option takes the `os[/arch[/variant]]` format; for example, + `linux/amd64` or `linux/arm64/v8`. Architecture and variant are optional, + and default to the daemon's native architecture if omitted. + + The following example pulls the RISC-V variant of the `alpine:latest` image + and saves it to a tar archive. + + ```console + $ docker pull --platform=linux/riscv64 alpine:latest + latest: Pulling from library/alpine + 8c4a05189a5f: Download complete + Digest: sha256:beefdbd8a1da6d2915566fde36db9db0b524eb737fc57cd1367effd16dc0d06d + Status: Downloaded newer image for alpine:latest + docker.io/library/alpine:latest + + $ docker image save --platform=linux/riscv64 -o alpine-riscv.tar alpine:latest + + $ ls -lh image.tar + -rw------- 1 thajeztah staff 3.9M Oct 7 11:06 alpine-riscv.tar + ``` + + The following example attempts to save a platform variant of `alpine:latest` + that doesn't exist in the local image store, resulting in an error. + + ```console + $ docker image ls --tree + IMAGE ID DISK USAGE CONTENT SIZE IN USE + alpine:latest beefdbd8a1da 10.6MB 3.37MB + ├─ linux/riscv64 80cde017a105 10.6MB 3.37MB + ├─ linux/amd64 33735bd63cf8 0B 0B + ├─ linux/arm/v6 50f635c8b04d 0B 0B + ├─ linux/arm/v7 f2f82d424957 0B 0B + ├─ linux/arm64/v8 9cee2b382fe2 0B 0B + ├─ linux/386 b3e87f642f5c 0B 0B + ├─ linux/ppc64le c7a6800e3dc5 0B 0B + └─ linux/s390x 2b5b26e09ca2 0B 0B + + $ docker image save --platform=linux/s390x -o alpine-s390x.tar alpine:latest + Error response from daemon: no suitable export target found for platform linux/s390x + ``` deprecated: false hidden: false experimental: false diff --git a/data/engine-cli/docker_image_tag.yaml b/data/engine-cli/docker_image_tag.yaml index 7dc8929588f3..40a0a014d5a8 100644 --- a/data/engine-cli/docker_image_tag.yaml +++ b/data/engine-cli/docker_image_tag.yaml @@ -2,38 +2,50 @@ command: docker image tag aliases: docker image tag, docker tag short: Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE long: |- - A full image name has the following format and components: - - `[HOST[:PORT_NUMBER]/]PATH` - - - `HOST`: The optional registry hostname specifies where the image is located. - The hostname must comply with standard DNS rules, but may not contain - underscores. If you don't specify a hostname, the command uses Docker's public - registry at `registry-1.docker.io` by default. Note that `docker.io` is the - canonical reference for Docker's public registry. - - `PORT_NUMBER`: If a hostname is present, it may optionally be followed by a - registry port number in the format `:8080`. - - `PATH`: The path consists of slash-separated components. Each - component may contain lowercase letters, digits and separators. A separator is - defined as a period, one or two underscores, or one or more hyphens. A component - may not start or end with a separator. While the - [OCI Distribution Specification](https://github.com/opencontainers/distribution-spec) - supports more than two slash-separated components, most registries only support - two slash-separated components. For Docker's public registry, the path format is - as follows: - - `[NAMESPACE/]REPOSITORY`: The first, optional component is typically a - user's or an organization's namespace. The second, mandatory component is the - repository name. When the namespace is not present, Docker uses `library` - as the default namespace. - - After the image name, the optional `TAG` is a custom, human-readable manifest - identifier that's typically a specific version or variant of an image. The tag - must be valid ASCII and can contain lowercase and uppercase letters, digits, - underscores, periods, and hyphens. It can't start with a period or hyphen and - must be no longer than 128 characters. If you don't specify a tag, the command uses `latest` by default. - - You can group your images together using names and tags, and then - [push](/reference/cli/docker/image/push/) them to a registry. + A Docker image reference consists of several components that describe where the + image is stored and its identity. These components are: + + ```text + [HOST[:PORT]/]NAMESPACE/REPOSITORY[:TAG] + ``` + + `HOST` + : Specifies the registry location where the image resides. If omitted, Docker + defaults to Docker Hub (`docker.io`). + + `PORT` + : An optional port number for the registry, if necessary (for example, `:5000`). + + `NAMESPACE/REPOSITORY` + : The namespace (optional) usually represents a user or organization. The + repository is required and identifies the specific image. If the namespace is + omitted, Docker defaults to `library`, the namespace reserved for Docker + Official Images. + + `TAG` + : An optional identifier used to specify a particular version or variant of the + image. If no tag is provided, Docker defaults to `latest`. + + ### Example image references + + `example.com:5000/team/my-app:2.0` + + - Host: `example.com` + - Port: `5000` + - Namespace: `team` + - Repository: `my-app` + - Tag: `2.0` + + `alpine` + + - Host: `docker.io` (default) + - Namespace: `library` (default) + - Repository: `alpine` + - Tag: `latest` (default) + + For more information on the structure and rules of image naming, refer to the + [Distribution reference](https://pkg.go.dev/github.com/distribution/reference#pkg-overview) + as the canonical definition of the format. usage: docker image tag SOURCE_IMAGE[:TAG] TARGET_IMAGE[:TAG] pname: docker image plink: docker_image.yaml diff --git a/data/engine-cli/docker_inspect.yaml b/data/engine-cli/docker_inspect.yaml index 154b95eb7a97..1fe0daebfca9 100644 --- a/data/engine-cli/docker_inspect.yaml +++ b/data/engine-cli/docker_inspect.yaml @@ -4,7 +4,59 @@ long: |- Docker inspect provides detailed information on constructs controlled by Docker. By default, `docker inspect` will render results in a JSON array. - +usage: docker inspect [OPTIONS] NAME|ID [NAME|ID...] +pname: docker +plink: docker.yaml +options: + - option: format + shorthand: f + value_type: string + description: |- + Format output using a custom template: + 'json': Print in JSON format + 'TEMPLATE': Print output using the given Go template. + Refer to https://docs.docker.com/go/formatting/ for more information about formatting output with templates + details_url: '#format' + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: size + shorthand: s + value_type: bool + default_value: "false" + description: Display total file sizes if the type is container + details_url: '#size' + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: type + value_type: string + description: Only inspect objects of the given type + details_url: '#type' + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +inherited_options: + - option: help + value_type: bool + default_value: "false" + description: Print usage + deprecated: false + hidden: true + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +examples: |- ### Format the output (--format) {#format} If a format is specified, the given template will be executed for each result. @@ -14,7 +66,7 @@ long: |- ### Specify target type (--type) {#type} - `--type container|image|node|network|secret|service|volume|task|plugin` + `--type config|container|image|node|network|secret|service|volume|task|plugin` The `docker inspect` command matches any type of object by either ID or name. In some cases multiple type of objects (for example, a container and a volume) @@ -57,56 +109,7 @@ long: |- $ docker inspect --size database -f '{{ .SizeRw }}' 12288 ``` -usage: docker inspect [OPTIONS] NAME|ID [NAME|ID...] -pname: docker -plink: docker.yaml -options: - - option: format - shorthand: f - value_type: string - description: |- - Format output using a custom template: - 'json': Print in JSON format - 'TEMPLATE': Print output using the given Go template. - Refer to https://docs.docker.com/go/formatting/ for more information about formatting output with templates - deprecated: false - hidden: false - experimental: false - experimentalcli: false - kubernetes: false - swarm: false - - option: size - shorthand: s - value_type: bool - default_value: "false" - description: Display total file sizes if the type is container - deprecated: false - hidden: false - experimental: false - experimentalcli: false - kubernetes: false - swarm: false - - option: type - value_type: string - description: Return JSON for specified type - deprecated: false - hidden: false - experimental: false - experimentalcli: false - kubernetes: false - swarm: false -inherited_options: - - option: help - value_type: bool - default_value: "false" - description: Print usage - deprecated: false - hidden: true - experimental: false - experimentalcli: false - kubernetes: false - swarm: false -examples: |- + ### Get an instance's IP address For the most part, you can pick out any field from the JSON in a fairly diff --git a/data/engine-cli/docker_load.yaml b/data/engine-cli/docker_load.yaml index 96472f88f78d..3f7d02e1954c 100644 --- a/data/engine-cli/docker_load.yaml +++ b/data/engine-cli/docker_load.yaml @@ -16,6 +16,17 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: platform + value_type: string + description: | + Load only the given platform variant. Formatted as `os[/arch[/variant]]` (e.g., `linux/amd64`) + deprecated: false + hidden: false + min_api_version: "1.48" + experimental: false + experimentalcli: false + kubernetes: false + swarm: false - option: quiet shorthand: q value_type: bool diff --git a/data/engine-cli/docker_login.yaml b/data/engine-cli/docker_login.yaml index 3ac770ea3754..d8d0c93eb75d 100644 --- a/data/engine-cli/docker_login.yaml +++ b/data/engine-cli/docker_login.yaml @@ -158,7 +158,7 @@ options: - option: password shorthand: p value_type: string - description: Password + description: Password or Personal Access Token (PAT) deprecated: false hidden: false experimental: false @@ -168,7 +168,7 @@ options: - option: password-stdin value_type: bool default_value: "false" - description: Take the password from stdin + description: Take the Password or Personal Access Token (PAT) from stdin details_url: '#password-stdin' deprecated: false hidden: false diff --git a/data/engine-cli/docker_network_connect.yaml b/data/engine-cli/docker_network_connect.yaml index 6c7b467c6103..7fc8ba9eb997 100644 --- a/data/engine-cli/docker_network_connect.yaml +++ b/data/engine-cli/docker_network_connect.yaml @@ -29,6 +29,17 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: gw-priority + value_type: int + default_value: "0" + description: | + Highest gw-priority provides the default gateway. Accepts positive and negative values. + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false - option: ip value_type: string description: IPv4 address (e.g., `172.30.100.104`) diff --git a/data/engine-cli/docker_network_create.yaml b/data/engine-cli/docker_network_create.yaml index aca1adb79d19..99706e3d55dd 100644 --- a/data/engine-cli/docker_network_create.yaml +++ b/data/engine-cli/docker_network_create.yaml @@ -170,10 +170,20 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: ipv4 + value_type: bool + default_value: "true" + description: Enable or disable IPv4 address assignment + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false - option: ipv6 value_type: bool default_value: "false" - description: Enable or disable IPv6 networking + description: Enable or disable IPv6 address assignment deprecated: false hidden: false experimental: false @@ -299,30 +309,31 @@ examples: |- ### Bridge driver options - When creating a custom network, the default network driver (i.e. `bridge`) has - additional options that can be passed. The following are those options and the - equivalent Docker daemon flags used for docker0 bridge: + When creating a custom `bridge` network, the following additional options can + be passed. Some of these have equivalent flags that can be used on the dockerd + command line or in `daemon.json` to configure the default bridge, `docker0`: - | Option | Equivalent | Description | - |--------------------------------------------------|-------------|-------------------------------------------------------| - | `com.docker.network.bridge.name` | - | Bridge name to be used when creating the Linux bridge | - | `com.docker.network.bridge.enable_ip_masquerade` | `--ip-masq` | Enable IP masquerading | - | `com.docker.network.bridge.enable_icc` | `--icc` | Enable or Disable Inter Container Connectivity | - | `com.docker.network.bridge.host_binding_ipv4` | `--ip` | Default IP when binding container ports | - | `com.docker.network.driver.mtu` | `--mtu` | Set the containers network MTU | - | `com.docker.network.container_iface_prefix` | - | Set a custom prefix for container interfaces | + | Network create option | Daemon option for `docker0` | Description | + |--------------------------------------------------|-----------------------------|-------------------------------------------------------| + | `com.docker.network.bridge.name` | - | Bridge name to be used when creating the Linux bridge | + | `com.docker.network.bridge.enable_ip_masquerade` | `--ip-masq` | Enable IP masquerading | + | `com.docker.network.bridge.enable_icc` | `--icc` | Enable or Disable Inter Container Connectivity | + | `com.docker.network.bridge.host_binding_ipv4` | `--ip` | Default IP when binding container ports | + | `com.docker.network.driver.mtu` | `--mtu` | Set the containers network MTU | + | `com.docker.network.container_iface_prefix` | - | Set a custom prefix for container interfaces | The following arguments can be passed to `docker network create` for any network driver, again with their approximate equivalents to Docker daemon - flags used for the docker0 bridge: - - | Argument | Equivalent | Description | - |--------------|----------------|--------------------------------------------| - | `--gateway` | - | IPv4 or IPv6 Gateway for the master subnet | - | `--ip-range` | `--fixed-cidr` | Allocate IPs from a range | - | `--internal` | - | Restrict external access to the network | - | `--ipv6` | `--ipv6` | Enable or disable IPv6 networking | - | `--subnet` | `--bip` | Subnet for network | + flags used for the `docker0` bridge: + + | Network create option | Daemon option for `docker0` | Description | + |-----------------------|-----------------------------------|--------------------------------------------| + | `--gateway` | - | IPv4 or IPv6 Gateway for the master subnet | + | `--ip-range` | `--fixed-cidr`, `--fixed-cidr-v6` | Allocate IP addresses from a range | + | `--internal` | - | Restrict external access to the network | + | `--ipv4` | - | Enable or disable IPv4 address assignment | + | `--ipv6` | `--ipv6` | Enable or disable IPv6 address assignment | + | `--subnet` | `--bip`, `--bip6` | Subnet for network | For example, let's use `-o` or `--opt` options to specify an IP address binding when publishing ports: diff --git a/data/engine-cli/docker_network_ls.yaml b/data/engine-cli/docker_network_ls.yaml index d6ba1e68bf35..1b3b702d092e 100644 --- a/data/engine-cli/docker_network_ls.yaml +++ b/data/engine-cli/docker_network_ls.yaml @@ -39,6 +39,7 @@ options: value_type: bool default_value: "false" description: Do not truncate the output + details_url: '#no-trunc' deprecated: false hidden: false experimental: false @@ -79,6 +80,8 @@ examples: |- 78b03ee04fc4 multi-host overlay swarm ``` + ### List networks without truncating the ID column (--no-trun) {#no-trunc} + Use the `--no-trunc` option to display the full network id: ```console diff --git a/data/engine-cli/docker_restart.yaml b/data/engine-cli/docker_restart.yaml index f8753787489a..56495ad81ca1 100644 --- a/data/engine-cli/docker_restart.yaml +++ b/data/engine-cli/docker_restart.yaml @@ -17,6 +17,17 @@ options: kubernetes: false swarm: false - option: time + value_type: int + default_value: "0" + description: | + Seconds to wait before killing the container (deprecated: use --timeout) + deprecated: true + hidden: true + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: timeout shorthand: t value_type: int default_value: "0" diff --git a/data/engine-cli/docker_rmi.yaml b/data/engine-cli/docker_rmi.yaml index 312aaf1f4fa0..624e99639b95 100644 --- a/data/engine-cli/docker_rmi.yaml +++ b/data/engine-cli/docker_rmi.yaml @@ -27,6 +27,18 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: platform + value_type: stringSlice + default_value: '[]' + description: | + Remove only the given platform variant. Formatted as `os[/arch[/variant]]` (e.g., `linux/amd64`) + deprecated: false + hidden: false + min_api_version: "1.50" + experimental: false + experimentalcli: false + kubernetes: false + swarm: false inherited_options: - option: help value_type: bool diff --git a/data/engine-cli/docker_run.yaml b/data/engine-cli/docker_run.yaml index 26cefcd309e0..9bced5ac57d2 100644 --- a/data/engine-cli/docker_run.yaml +++ b/data/engine-cli/docker_run.yaml @@ -984,6 +984,16 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: use-api-socket + value_type: bool + default_value: "false" + description: Bind mount Docker API socket and required auth + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false - option: user shorthand: u value_type: string diff --git a/data/engine-cli/docker_save.yaml b/data/engine-cli/docker_save.yaml index 1bda60a52f0e..f8876441f3e1 100644 --- a/data/engine-cli/docker_save.yaml +++ b/data/engine-cli/docker_save.yaml @@ -16,6 +16,17 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: platform + value_type: string + description: | + Save only the given platform variant. Formatted as `os[/arch[/variant]]` (e.g., `linux/amd64`) + deprecated: false + hidden: false + min_api_version: "1.48" + experimental: false + experimentalcli: false + kubernetes: false + swarm: false inherited_options: - option: help value_type: bool diff --git a/data/engine-cli/docker_search.yaml b/data/engine-cli/docker_search.yaml index f2b379e7c595..7df5eba91a16 100644 --- a/data/engine-cli/docker_search.yaml +++ b/data/engine-cli/docker_search.yaml @@ -164,7 +164,6 @@ examples: |- | `.Description` | Image description | | `.StarCount` | Number of stars for the image | | `.IsOfficial` | "OK" if image is official | - | `.IsAutomated` | "OK" if image build was automated (deprecated) | When you use the `--format` option, the `search` command will output the data exactly as the template declares. If you use the diff --git a/data/engine-cli/docker_stop.yaml b/data/engine-cli/docker_stop.yaml index 298975752fb2..5a26774186ce 100644 --- a/data/engine-cli/docker_stop.yaml +++ b/data/engine-cli/docker_stop.yaml @@ -17,6 +17,17 @@ options: kubernetes: false swarm: false - option: time + value_type: int + default_value: "0" + description: | + Seconds to wait before killing the container (deprecated: use --timeout) + deprecated: true + hidden: true + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: timeout shorthand: t value_type: int default_value: "0" diff --git a/data/engine-cli/docker_swarm_init.yaml b/data/engine-cli/docker_swarm_init.yaml index 1e5e8f9e7a96..3344dd81ddc4 100644 --- a/data/engine-cli/docker_swarm_init.yaml +++ b/data/engine-cli/docker_swarm_init.yaml @@ -102,6 +102,7 @@ options: value_type: duration default_value: 5s description: Dispatcher heartbeat period (ns|us|ms|s|m|h) + details_url: '#dispatcher-heartbeat' deprecated: false hidden: false experimental: false @@ -217,7 +218,7 @@ examples: |- After disabling it, the encryption key is no longer required to start the manager, and it will start up on its own without user intervention. - ### Configure node healthcheck frequency (--dispatcher-heartbeat) + ### Configure node healthcheck frequency (--dispatcher-heartbeat) {#dispatcher-heartbeat} The `--dispatcher-heartbeat` flag sets the frequency at which nodes are told to report their health. diff --git a/data/engine-cli/docker_swarm_join-token.yaml b/data/engine-cli/docker_swarm_join-token.yaml index ccab60a619f0..69a69bb5cab2 100644 --- a/data/engine-cli/docker_swarm_join-token.yaml +++ b/data/engine-cli/docker_swarm_join-token.yaml @@ -21,6 +21,7 @@ options: value_type: bool default_value: "false" description: Only display token + details_url: '#quiet' deprecated: false hidden: false experimental: false @@ -31,6 +32,7 @@ options: value_type: bool default_value: "false" description: Rotate join token + details_url: '#rotate' deprecated: false hidden: false experimental: false @@ -97,7 +99,7 @@ examples: |- SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-b30ljddcqhef9b9v4rs7mel7t ``` - ### `--rotate` + ### `--rotate` {#rotate} Because tokens allow new nodes to join the swarm, you should keep them secret. Be particularly careful with manager tokens since they allow new manager nodes @@ -116,7 +118,7 @@ examples: |- using the old token. Rotation does not affect existing nodes in the swarm because the join token is only used for authorizing new nodes joining the swarm. - ### `--quiet` + ### `--quiet` {#quiet} Only print the token. Do not print a complete command for joining. deprecated: false diff --git a/data/engine-cli/docker_swarm_join.yaml b/data/engine-cli/docker_swarm_join.yaml index ee2b43ac3739..cab96ffe3db1 100644 --- a/data/engine-cli/docker_swarm_join.yaml +++ b/data/engine-cli/docker_swarm_join.yaml @@ -11,6 +11,7 @@ options: - option: advertise-addr value_type: string description: 'Advertised address (format: `[:port]`)' + details_url: '#advertise-addr' deprecated: false hidden: false experimental: false @@ -21,6 +22,7 @@ options: value_type: string default_value: active description: Availability of the node (`active`, `pause`, `drain`) + details_url: '#availability' deprecated: false hidden: false experimental: false @@ -31,6 +33,7 @@ options: value_type: string description: | Address or interface to use for data path traffic (format: ``) + details_url: '#data-path-addr' deprecated: false hidden: false min_api_version: "1.31" @@ -42,6 +45,7 @@ options: value_type: node-addr default_value: 0.0.0.0:2377 description: 'Listen address (format: `[:port]`)' + details_url: '#listen-addr' deprecated: false hidden: false experimental: false @@ -51,6 +55,7 @@ options: - option: token value_type: string description: Token for entry into the swarm + details_url: '#token' deprecated: false hidden: false experimental: false @@ -102,7 +107,7 @@ examples: |- dvfxp4zseq4s0rih1selh0d20 * manager1 Ready Active Leader ``` - ### `--listen-addr value` + ### `--listen-addr value` {#listen-addr} If the node is a manager, it will listen for inbound swarm manager traffic on this address. The default is to listen on 0.0.0.0:2377. It is also possible to specify a @@ -113,7 +118,7 @@ examples: |- This flag is generally not necessary when joining an existing swarm. - ### `--advertise-addr value` + ### `--advertise-addr value` {#advertise-addr} This flag specifies the address that will be advertised to other members of the swarm for API access. If unspecified, Docker will check if the system has a @@ -133,7 +138,7 @@ examples: |- ensure the node advertises its IP address and not the IP address of the load balancer. - ### `--data-path-addr` + ### `--data-path-addr` {#data-path-addr} This flag specifies the address that global scope network drivers will publish towards other nodes in order to reach the containers running on this node. @@ -142,11 +147,11 @@ examples: |- If unspecified, Docker will use the same IP address or interface that is used for the advertise address. - ### `--token string` + ### `--token string` {#token} Secret value required for nodes to join the swarm - ### `--availability` + ### `--availability` {#availability} This flag specifies the availability of the node at the time the node joins a master. Possible availability values are `active`, `pause`, or `drain`. diff --git a/data/engine-cli/docker_swarm_unlock-key.yaml b/data/engine-cli/docker_swarm_unlock-key.yaml index d7d8fb202036..93ab3496fa47 100644 --- a/data/engine-cli/docker_swarm_unlock-key.yaml +++ b/data/engine-cli/docker_swarm_unlock-key.yaml @@ -22,6 +22,7 @@ options: value_type: bool default_value: "false" description: Only display token + details_url: '#quiet' deprecated: false hidden: false experimental: false @@ -32,6 +33,7 @@ options: value_type: bool default_value: "false" description: Rotate unlock key + details_url: '#rotate' deprecated: false hidden: false experimental: false @@ -87,12 +89,12 @@ examples: |- SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8 ``` - ### `--rotate` + ### `--rotate` {#rotate} This flag rotates the unlock key, replacing it with a new randomly-generated key. The old unlock key will no longer be accepted. - ### `--quiet` + ### `--quiet` {#quiet} Only print the unlock key, without instructions. deprecated: false diff --git a/data/engine-cli/docker_system_events.yaml b/data/engine-cli/docker_system_events.yaml index eef3dccfe694..89eed4c996eb 100644 --- a/data/engine-cli/docker_system_events.yaml +++ b/data/engine-cli/docker_system_events.yaml @@ -7,7 +7,7 @@ long: |- scoped events are only seen on the node they take place on, and Swarm scoped events are seen on all managers. - Only the last 1000 log events are returned. You can use filters to further limit + Only the last 256 log events are returned. You can use filters to further limit the number of events returned. ### Object types @@ -137,7 +137,7 @@ long: |- seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a fraction of a second no more than nine digits long. - Only the last 1000 log events are returned. You can use filters to further limit + Only the last 256 log events are returned. You can use filters to further limit the number of events returned. #### Filtering (--filter) {#filter} diff --git a/data/glossary.yaml b/data/glossary.yaml index d65ff45740a8..7fbad242ddfd 100644 --- a/data/glossary.yaml +++ b/data/glossary.yaml @@ -1,283 +1,78 @@ -amd64: | - AMD64 is AMD's 64-bit extension of Intel's x86 architecture, and is also - referred to as x86_64 (or x86-64). -arm64: | - ARM64 is the 64-bit extension of the ARM CPU architecture. arm64 architecture - is used in Apple silicon machines. -btrfs: | - btrfs (B-tree file system) is a Linux [filesystem](#filesystem) that Docker - supports as a storage backend. It is a [copy-on-write](https://en.wikipedia.org/wiki/Copy-on-write) filesystem. -build: | - Build is the process of building Docker images using a [Dockerfile](#dockerfile). - The build uses a Dockerfile and a "context". The context is the set of files in the directory in which the image is built. -cgroups: | - cgroups is a Linux kernel feature that limits, accounts for, and isolates - the resource usage (CPU, memory, disk I/O, network, etc.) of a collection - of processes. Docker relies on cgroups to control and isolate resource limits. - - *Also known as control groups* -cluster: | - A cluster is a group of machines that work together to run workloads and provide high availability. -Compose: | - [Compose](https://github.com/docker/compose) is a tool for defining and - running complex applications with Docker. With Compose, you define a - multi-container application in a single file, then spin your - application up in a single command which does everything that needs to - be done to get it running. - - *Also known as Docker Compose* -copy-on-write: | - Docker uses a - [copy-on-write](/engine/storage/drivers/#the-copy-on-write-cow-strategy) - technique and a [union file system](#union-file-system) for both images and - containers to optimize resources and speed performance. Multiple copies of an - entity share the same instance and each one makes only specific changes to its - unique layer. - - Multiple containers can share access to the same image, and make - container-specific changes on a writable layer which is deleted when - the container is removed. This speeds up container start times and performance. - - Images are essentially layers of filesystems typically predicated on a base - image under a writable layer, and built up with layers of differences from the - base image. This minimizes the footprint of the image and enables shared - development. - - For more about copy-on-write in the context of Docker, see [Understand images, - containers, and storage - drivers](/engine/storage/drivers/). -container: | - A container is a runtime instance of a [docker image](#image). - - A Docker container consists of - - - A Docker image - - An execution environment - - A standard set of instructions - - The concept is borrowed from shipping containers, which define a standard to ship goods globally. Docker defines a standard to ship software. -container image: | - Docker images are the basis of containers. An image is an ordered collection of root filesystem changes and the corresponding execution parameters for use within a container runtime. An image typically contains a union of layered filesystems stacked on top of each other. -Docker: | - The term Docker can refer to - - - The Docker project as a whole, which is a platform for developers and sysadmins to develop, ship, and run applications - - The docker daemon process running on the host which manages images and containers (also called Docker Engine) -Docker Business: | - Docker Business is a Docker subscription. Docker Business offers centralized management and advanced security features for enterprises that use Docker at scale. It empowers leaders to manage their Docker development environments and accelerate their secure software supply chain initiatives. -Docker Desktop: | - Docker Desktop is an easy-to-install, lightweight - Docker development environment. Docker Desktop is available for [Mac](#docker-desktop-for-mac), [Windows](#docker-desktop-for-windows), and [Linux](#docker-desktop-for-linux), providing developers a consistent experience across platforms. Docker Desktop includes Docker Engine, Docker CLI client, Docker Compose, Docker Content Trust, Kubernetes, and Credential Helper. - - Docker Desktop works with your choice of development tools and languages and gives you access to a vast library of certified images and templates in Docker Hub. This enables development teams to extend their environment to rapidly auto-build, continuously integrate, and collaborate using a secure repository. -Docker Desktop for Mac: | - Docker Desktop for Mac is an easy-to-install, lightweight - Docker development environment designed specifically for the Mac. A native - Mac application, Docker Desktop for Mac uses the macOS Hypervisor - framework, networking, and filesystem. It's the best solution if you want - to build, debug, test, package, and ship Dockerized applications on a - Mac. -Docker Desktop for Windows: | - Docker Desktop for Windows is an - easy-to-install, lightweight Docker development environment designed - specifically for Windows systems that support WSL 2 and Microsoft Hyper-V. - Docker Desktop for Windows uses WSL 2 or Hyper-V for - virtualization. Docker Desktop for Windows is the best solution if you want to build, debug, test, package, and ship Dockerized applications from Windows machines. -Docker Desktop for Linux: | - Docker Desktop for Linux is an easy-to-install, lightweight Docker development environment designed specifically for Linux machines. It's the best solution if you want to build, debug, test, package, and ship Dockerized applications on a Linux machine. -Docker Hub: | - [Docker Hub](https://hub.docker.com/) is a centralized resource for working with Docker and its components. It provides the following services: - - - A registry to host Docker images - - User authentication - - Automated image builds and workflow tools such as build triggers and web hooks - - Integration with GitHub and Bitbucket - - Security vulnerability scanning -Docker ID: | - Your free Docker ID grants you access to Docker Hub repositories and some beta programs. All you need is an email address. -Docker Official Images: | - The Docker Official Images are a curated set of Docker repositories hosted on [Docker Hub](#docker-hub). Docker, Inc. sponsors a dedicated team that is responsible for reviewing and publishing all content in the Docker Official Images. This team works in collaboration with upstream software maintainers, security experts, and the broader Docker community. -Docker Open Source Images: | - Docker Open Source Images are published and maintained by organizations that are a member of the Docker Open Source Program. -Docker Personal: | - Docker Personal is a [Docker subscription](#docker-subscription). With its focus on the open-source communities, individual developers, education, and small businesses, Docker Personal will continue to allow free use of Docker components - including the Docker CLI, Docker Compose, Docker Engine, Docker Desktop, Docker Hub, Kubernetes, Docker Build and Docker BuildKit, Docker Official Images, Docker Scan, and more. -Docker Pro: | - Docker Pro is a [Docker subscription](#docker-subscription). Docker Pro enables individual developers to get more control of their development environment and provides an integrated and reliable developer experience. It reduces the amount of time developers spend on mundane and repetitive tasks and empowers developers to spend more time creating value for their customers. -Docker subscription: | - Docker subscription tiers, sometimes referred to as plans, include [Personal](#docker-personal), [Pro](#docker-pro), [Team](#docker-team), and [Business](#docker-business). For more details, see [Docker subscription overview](/subscription/details/). -Docker Team: | - Docker Team is a [Docker subscription](#docker-subscription). Docker Team offers capabilities for collaboration, productivity, and security across organizations. It enables groups of developers to unlock the full power of collaboration and sharing combined with essential security features and team management capabilities. -Docker Trusted Content Program: | - The Docker Trusted Content Program verifies content through four programs, [Docker Official Images](#docker-official-images), [Docker Verified Publisher Images](#docker-verified-publisher-images), [Docker Open Source Images](#docker-open-source-images), and Custom Official Images. -Docker Verified Publisher Images: | - Docker Verified Publisher Images are confirmed by Docker to be from a trusted software publishers that are partners in the Verified Publisher program. Docker Verified Publisher Images are identified by the Verified Publisher badge included on the Docker Hub repositories. -Dockerfile: | - A Dockerfile is a text document that contains all the commands you would - normally execute manually in order to build a Docker image. Docker can - build images automatically by reading the instructions from a Dockerfile. -ENTRYPOINT: | - In a Dockerfile, an `ENTRYPOINT` is an optional definition for the first part - of the command to be run. If you want your Dockerfile to be runnable without - specifying additional arguments to the `docker run` command, you must specify - either `ENTRYPOINT`, `CMD`, or both. - - - If `ENTRYPOINT` is specified, it is set to a single command. Most official - Docker images have an `ENTRYPOINT` of `/bin/sh` or `/bin/bash`. Even if you - do not specify `ENTRYPOINT`, you may inherit it from the base image that you - specify using the `FROM` keyword in your Dockerfile. To override the - `ENTRYPOINT` at runtime, you can use `--entrypoint`. The following example - overrides the entrypoint to be `/bin/ls` and sets the `CMD` to `-l /tmp`. - - ```bash - $ docker run --entrypoint=/bin/ls ubuntu -l /tmp - ``` - - - `CMD` is appended to the `ENTRYPOINT`. The `CMD` can be any arbitrary string - that is valid in terms of the `ENTRYPOINT`, which allows you to pass - multiple commands or flags at once. To override the `CMD` at runtime, just - add it after the container name or ID. In the following example, the `CMD` - is overridden to be `/bin/ls -l /tmp`. - - ```bash - $ docker run ubuntu /bin/ls -l /tmp - ``` - - In practice, `ENTRYPOINT` is not often overridden. However, specifying the - `ENTRYPOINT` can make your images more flexible and easier to reuse. -filesystem: | - A file system is the method an operating system uses to name files - and assign them locations for efficient storage and retrieval. - - Examples : - - - Linux : overlay2, extfs, btrfs, zfs - - Windows : NTFS - - macOS : APFS -image: | - Docker images are the basis of [containers](#container). An Image is an - ordered collection of root filesystem changes and the corresponding - execution parameters for use within a container runtime. An image typically - contains a union of layered filesystems stacked on top of each other. An image - does not have state and it never changes. -invitee: | - People who have been invited to join an [organization](#organization), but have not yet accepted their invitation. -layer: | - In an image, a layer is modification to the image, represented by an instruction in the Dockerfile. Layers are applied in sequence to the base image to create the final image. - When an image is updated or rebuilt, only layers that change need to be updated, and unchanged layers are cached locally. This is part of why Docker images are so fast and lightweight. The sizes of each layer add up to equal the size of the final image. -libcontainer: | - libcontainer provides a native Go implementation for creating containers with - namespaces, cgroups, capabilities, and filesystem access controls. It allows - you to manage the lifecycle of the container performing additional operations - after the container is created. -libnetwork: | - libnetwork provides a native Go implementation for creating and managing container network namespaces and other network resources. It manages the networking lifecycle of the container performing additional operations after the container is created. -member: | - The people who have received and accepted invitations to join an [organization](#organization). Member can also refer to members of a [team](#team) within an organization. -namespace: | - A [Linux namespace](https://man7.org/linux/man-pages/man7/namespaces.7.html) - is a Linux kernel feature that isolates and virtualizes system resources. Processes which are restricted to a namespace can only interact with resources or processes that are part of the same namespace. Namespaces - are an important part of Docker's isolation model. Namespaces exist for each type of resource, including `net` (networking), `mnt` (storage), `pid` (processes), `uts` (hostname control), and `user` (UID mapping). For more information about namespaces, see [Docker run reference](/engine/containers/run/) and [Isolate containers with a user namespace](/engine/security/userns-remap/). -node: | - A [node](/engine/swarm/how-swarm-mode-works/nodes/) is a physical or virtual - machine running an instance of the Docker Engine in [swarm mode](#swarm-mode). - - Manager nodes perform swarm management and orchestration duties. By default manager nodes are also worker nodes. - - Worker nodes execute tasks. -organization: | - An organization is a collection of teams and repositories that can be managed together. Docker users become members of an organization when they are assigned to at least one team in the organization. -organization name: | - The organization name, sometimes referred to as the organization namespace or the org ID, is the unique identifier of a Docker organization. -overlay network driver: | - Overlay network driver provides out of the box multi-host network connectivity - for Docker containers in a cluster. -overlay storage driver: | - OverlayFS is a [filesystem](#filesystem) service for Linux which implements a - [union mount](https://en.wikipedia.org/wiki/Union_mount) for other file systems. - It is supported by the Docker daemon as a storage driver. base image: | A base image is an image you designate in a `FROM` directive in a Dockerfile. It defines the starting point for your build. Dockerfile instructions create additional layers on top of the base image. A Dockerfile with the `FROM scratch` directive uses an empty base image. -persistent storage: | - Persistent storage or volume storage provides a way for a user to add a - persistent layer to the running container's file system. This persistent layer - could live on the container host or an external device. The lifecycle of this - persistent layer is not connected to the lifecycle of the container, allowing - a user to retain state. -registry: | - A Registry is a hosted service containing [repositories](#repository) of [images](#image) which responds to the Registry API. - The default registry can be accessed using a browser at [Docker Hub](#docker-hub) or using the `docker search` command. -repository: | - A repository is a set of Docker images. A repository can be shared by pushing it to a [registry](#registry) server. The different images in the repository can be labeled using [tags](#tag). - - Here is an example of the shared [nginx repository](https://hub.docker.com/_/nginx/) and its [tags](https://hub.docker.com/r/library/nginx/tags/). -SSH: | - SSH (secure shell) is a secure protocol for accessing remote machines and applications. - It provides authentication and encrypts data communication over insecure networks such as the Internet. SSH uses public/private key pairs to authenticate logins. -seats: | - The number of seats refers to the number of planned members within an [organization](#organization). -service: | - A [service](/engine/swarm/how-swarm-mode-works/services/) is the definition of how you want to run your application containers in a swarm. At the most basic level, a service defines which container image to run in the swarm and which commands to run in the container. For orchestration purposes, the service defines the "desired state", meaning how many containers to run as tasks and constraints for deploying the containers. +build: | + Build is the process of building Docker images using a Dockerfile. The build + uses a Dockerfile and a "context". The context is the set of files in the + directory in which the image is built. - Frequently a service is a microservice within the context of some larger - application. Examples of services might include an HTTP server, a database, or - any other type of executable program that you wish to run in a distributed - environment. -service account: | - A service account is a Docker ID used for automated management of container images or containerized applications. Service accounts are typically used in automated workflows, and do not share Docker IDs with the members in a Docker Team or Docker Business subscription plan. -service discovery: | - Swarm mode [container discovery](/engine/network/drivers/overlay/#container-discovery) is a DNS component internal to the swarm that automatically assigns each service on an overlay network in the swarm a VIP and DNS entry. Containers on the network share DNS mappings for the service through gossip so any container on the network can access the service through its service name. +container: | + A container is a runnable instance of an image. You can start, stop, move, or + delete a container using the Docker CLI or API. Containers are isolated from + one another and the host system but share the OS kernel. They provide a + lightweight and consistent way to run applications. + +context: | + A Docker context contains endpoint configuration for the Docker CLI to connect + to different Docker environments, such as remote Docker hosts or Docker + Desktop. Use `docker context use` to switch between contexts. + +Docker CLI: | + The Docker CLI is the command-line interface for interacting with the Docker + Engine. It provides commands like `docker run`, `docker build`, `docker ps`, + and others to manage Docker containers, images, and services. + +Docker Compose: | + Docker Compose is a tool for defining and running multi-container Docker + applications using a YAML file (`compose.yaml`). With a single command, you + can start all services defined in the configuration. - You don’t need to expose service-specific ports to make the service available to other services on the same overlay network. The swarm’s internal load balancer automatically distributes requests to the service VIP among the active tasks. -swarm: | - A [swarm](/engine/swarm/) is a cluster of one or more Docker Engines running in [swarm mode](#swarm-mode). -swarm mode: | - [Swarm mode](/engine/swarm/) refers to cluster management and orchestration - features embedded in Docker Engine. When you initialize a new swarm (cluster) or join nodes to a swarm, the Docker Engine runs in swarm mode. -tag: | - A tag is a label applied to a Docker image in a [repository](#repository). - Tags are how various images in a repository are distinguished from each other. -task: | - A [task](/engine/swarm/how-swarm-mode-works/services/#tasks-and-scheduling) is the atomic unit of scheduling within a swarm. A task carries a Docker container and the commands to run inside the container. Manager nodes assign tasks to worker nodes according to the number of replicas set in the service scale. -team: | - A team is a group of Docker users that belong to an [organization](#organization). An organization can have multiple teams. -Union file system: | - Union file systems implement a [union - mount](https://en.wikipedia.org/wiki/Union_mount) and operate by creating - layers. Docker uses union file systems in conjunction with - [copy-on-write](#copy-on-write) techniques to provide the building blocks for - containers, making them very lightweight and fast. +Docker Desktop: | + Docker Desktop is an easy-to-install application for Windows, macOS, and Linux + that provides a local Docker development environment. It includes Docker + Engine, Docker CLI, Docker Compose, and a Kubernetes cluster. - For more on Docker and union file systems, see [Docker and OverlayFS in - practice](/engine/storage/drivers/overlayfs-driver/). +Docker Engine: | + Docker Engine is the client-server technology that creates and runs Docker + containers. It includes the Docker daemon (`dockerd`), REST API, and the + Docker CLI client. - Example implementations of union file systems are - [UnionFS](https://en.wikipedia.org/wiki/UnionFS) and - [OverlayFS](https://en.wikipedia.org/wiki/OverlayFS). -virtual machine: | - A virtual machine is a program that emulates a complete computer and imitates dedicated hardware. - It shares physical hardware resources with other users but isolates the operating system. The end user has the same experience on a Virtual Machine as they would have on dedicated hardware. +Docker Hub: | + Docker Hub is Docker’s public registry service where users can store, share, + and manage container images. It hosts Docker Official Images, Verified + Publisher content, and community-contributed images. - Compared to containers, a virtual machine is heavier to run, provides more isolation, gets its own set of resources and does minimal sharing. +image: | + An image is a read-only template used to create containers. It typically + includes a base operating system and application code packaged together using + a Dockerfile. Images are versioned using tags and can be pushed to or pulled + from a container registry like Docker Hub. - *Also known as VM* -volume: | - A volume is a specially-designated directory within one or more containers - that bypasses the Union File System. Volumes are designed to persist data, - independent of the container's life cycle. Docker therefore never automatically deletes volumes when you remove a container, nor will it "garbage collect" volumes that are no longer referenced by a container. - *Also known as: data volume* +layer: | + In an image, a layer is a modification represented by an instruction in the + Dockerfile. Layers are applied in sequence to the base image to create the + final image. Unchanged layers are cached, making image builds faster and more + efficient. - There are three types of volumes: *host, anonymous, and named*: +multi-architecture image: | + A multi-architecture image is a Docker image that supports multiple CPU + architectures, like `amd64` or `arm64`. Docker automatically pulls the correct + architecture image for your platform when using a multi-arch image. - - A **host volume** lives on the Docker host's filesystem and can be accessed from within the container. +persistent storage: | + Persistent storage or volume storage provides a way for containers to retain + data beyond their lifecycle. This storage can exist on the host machine or an + external storage system and is not tied to the container's runtime. - - A **named volume** is a volume which Docker manages where on disk the volume is created, but it is given a name. +registry: | + A registry is a storage and content delivery system for Docker images. The + default public registry is Docker Hub, but you can also set up private + registries using Docker Distribution. - - An **anonymous volume** is similar to a named volume, however, it can be difficult to refer to the same volume over time when it is an anonymous volume. Docker handles where the files are stored. -x86_64: | - x86_64 (or x86-64) refers to a 64-bit instruction set invented by AMD as an - extension of Intel's x86 architecture. AMD calls its x86_64 architecture, - AMD64, and Intel calls its implementation, Intel 64. +volume: | + A volume is a special directory within a container that bypasses the Union + File System. Volumes are designed to persist data independently of the + container lifecycle. Docker supports host, anonymous, and named volumes. \ No newline at end of file diff --git a/data/init-cli/docker_init.yaml b/data/init-cli/docker_init.yaml index 49c968ca7874..caf04c790207 100644 --- a/data/init-cli/docker_init.yaml +++ b/data/init-cli/docker_init.yaml @@ -16,12 +16,11 @@ long: |- init` can overwrite it, using `docker-compose.yaml` as the name for the Compose file. - > **Warning** + > [!WARNING] > > You can't recover overwritten files. > To back up an existing file before selecting to overwrite it, rename the file or copy it to another directory. - { .warning } - + After running `docker init`, you can choose one of the following templates: * ASP.NET Core: Suitable for an ASP.NET Core application. diff --git a/data/offload-cli/docker_offload.yaml b/data/offload-cli/docker_offload.yaml new file mode 100644 index 000000000000..ca4877c0f0c8 --- /dev/null +++ b/data/offload-cli/docker_offload.yaml @@ -0,0 +1,25 @@ +command: docker offload +short: Control Docker Offload from the CLI +usage: docker offload +pname: docker +plink: docker.yaml +cname: + - docker offload accounts + - docker offload diagnose + - docker offload start + - docker offload status + - docker offload stop + - docker offload version +clink: + - docker_offload_accounts.yaml + - docker_offload_diagnose.yaml + - docker_offload_start.yaml + - docker_offload_status.yaml + - docker_offload_stop.yaml + - docker_offload_version.yaml +deprecated: false +hidden: true +experimental: false +experimentalcli: false +kubernetes: false +swarm: false diff --git a/data/offload-cli/docker_offload_accounts.yaml b/data/offload-cli/docker_offload_accounts.yaml new file mode 100644 index 000000000000..828e9359ef8e --- /dev/null +++ b/data/offload-cli/docker_offload_accounts.yaml @@ -0,0 +1,12 @@ +command: docker offload accounts +short: Prints available Docker Offload accounts +usage: docker offload accounts +pname: docker offload +plink: docker_offload.yaml +options: [] +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false diff --git a/data/offload-cli/docker_offload_diagnose.yaml b/data/offload-cli/docker_offload_diagnose.yaml new file mode 100644 index 000000000000..c553faf71b0f --- /dev/null +++ b/data/offload-cli/docker_offload_diagnose.yaml @@ -0,0 +1,12 @@ +command: docker offload diagnose +short: Print diagnostic information for Docker Offload +usage: docker offload diagnose +pname: docker offload +plink: docker_offload.yaml +options: [] +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false diff --git a/data/offload-cli/docker_offload_start.yaml b/data/offload-cli/docker_offload_start.yaml new file mode 100644 index 000000000000..1c0a24d0fb62 --- /dev/null +++ b/data/offload-cli/docker_offload_start.yaml @@ -0,0 +1,34 @@ +command: docker offload start +short: Start a Docker Offload session +usage: docker offload start +pname: docker offload +plink: docker_offload.yaml +options: + - option: account + shorthand: a + value_type: string + default_value: "" + description: The Docker account to use + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: gpu + shorthand: g + value_type: bool + default_value: "false" + description: Request an engine with a gpu + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false diff --git a/data/offload-cli/docker_offload_status.yaml b/data/offload-cli/docker_offload_status.yaml new file mode 100644 index 000000000000..2b07a346be49 --- /dev/null +++ b/data/offload-cli/docker_offload_status.yaml @@ -0,0 +1,23 @@ +command: docker offload status +short: Show the status of the Docker Offload connection +usage: docker offload status [OPTIONS] +pname: docker offload +plink: docker_offload.yaml +options: + - option: watch + shorthand: w + value_type: bool + default_value: "false" + description: Watch for status updates + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false diff --git a/data/offload-cli/docker_offload_stop.yaml b/data/offload-cli/docker_offload_stop.yaml new file mode 100644 index 000000000000..a78609c3ed95 --- /dev/null +++ b/data/offload-cli/docker_offload_stop.yaml @@ -0,0 +1,23 @@ +command: docker offload stop +short: Stop a Docker Offload session +usage: docker offload stop [OPTIONS] +pname: docker offload +plink: docker_offload.yaml +options: + - option: force + shorthand: f + value_type: bool + default_value: "false" + description: Don't prompt for confirmation + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false diff --git a/data/offload-cli/docker_offload_version.yaml b/data/offload-cli/docker_offload_version.yaml new file mode 100644 index 000000000000..f98288bdc3bb --- /dev/null +++ b/data/offload-cli/docker_offload_version.yaml @@ -0,0 +1,32 @@ +command: docker offload version +short: Prints the Docker Offload CLI version +usage: docker offload version [OPTIONS] +pname: docker offload +plink: docker_offload.yaml +options: + - option: json + value_type: bool + default_value: "false" + description: Prints the version as JSON + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: short + value_type: bool + default_value: "false" + description: Prints the short version + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false +deprecated: false +hidden: false +experimental: false +experimentalcli: false +kubernetes: false +swarm: false diff --git a/data/redirects.yml b/data/redirects.yml index cae91bfb671d..727d6ecdb1fb 100644 --- a/data/redirects.yml +++ b/data/redirects.yml @@ -8,10 +8,8 @@ # provide a short, permanent link to refer to a topic in the documentation. # For example, the docker CLI can output https://docs.docker.com/go/some-topic # in its help output, which can be redirected to elsewhere in the documentation. -"/security/for-developers/access-tokens/": +"/security/access-tokens/": - /go/access-tokens/ -"/desktop/mac/apple-silicon/": - - /go/apple-silicon/ "/reference/api/engine/#deprecated-api-versions": - /engine/api/v1.18/ - /engine/api/v1.19/ @@ -31,9 +29,6 @@ - /reference/api/docker_remote_api_v1.21/ - /reference/api/docker_remote_api_v1.22/ - /reference/api/docker_remote_api_v1.23/ -# redirect for old location of dockerd reference -"/reference/cli/dockerd/": - - /engine/reference/commandline/dockerd/ # redirect updated location of the "running containers" page "/engine/containers/run/": - "/engine/reference/run/" @@ -83,13 +78,6 @@ - /go/formatting/ "/?utm_source=docker&utm_medium=inproductad&utm_campaign=20-11nurturecli_docs": - /go/guides/ -"/desktop/get-started/#credentials-management-for-linux-users": - - /go/linux-credentials/ -"/desktop/use-desktop/pause/": - - /go/mac-desktop-pause/ - - /go/win-desktop-pause/ -"/desktop/settings-and-maintenance/settings/#file-sharing": - - /go/mac-file-sharing/ "/engine/security/rootless/": # Instructions on running docker in rootless mode. This redirect is currently # used in the installation script at "get.docker.com" @@ -98,9 +86,7 @@ - /go/storage-driver/ "/docker-hub/vulnerability-scanning/": - /go/tip-scanning/ -"/desktop/features/wsl/": - # Link used by Docker Desktop to refer users on how to activate WSL 2 - - /go/wsl2/ + "/reference/api/hub/latest/": - /docker-hub/api/latest/ "/reference/api/hub/dvp/": @@ -236,9 +222,18 @@ "https://www.docker.com/products/build-cloud/?utm_campaign=2024-02-02-dbc_cli&utm_medium=in-product-ad&utm_source=desktop_v4": - /go/docker-build-cloud/ -# Run Cloud links +# Run Cloud links & Docker Cloud & Docker Offload "/": - /go/run-cloud-eap/ +"/offload/": + # Onboarding - VDI beta + - /go/docker-cloud/ +"/offload/about/": + # Onboarding - enable GPU + - /go/docker-cloud-gpu/ +"/ai/compose/models-and-compose/": + # Onboarding - enable GPU - compose for agents + - /go/compose-for-agents/ # CLI backlinks "/engine/cli/filter/": @@ -263,16 +258,9 @@ # Docker Debug "/reference/cli/docker/debug/": - /go/debug-cli/ -"/desktop/use-desktop/container/#integrated-terminal": +"/desktop/use-desktop/container/#debug": - /go/debug-gui/ -# Docker Desktop - volumes cloud backup -"/desktop/use-desktop/volumes/#export-a-volume": - - /go/volume-export/ - - /go/volume-export-aws/ - - /go/volume-export-azure/ - - /go/volume-export-gcs/ - # Docker Admin Console - Insights "/admin/organization/insights/": - /go/insights/ @@ -288,3 +276,77 @@ - /go/insights-images/ "/admin/organization/insights/#extensions": - /go/insights-extensions/ + +# Billing - cancellation +"/subscription/desktop-license/": + - /go/desktop-license/ +"/docker-hub/usage/pulls/": + - /go/hub-pull-limits/ + + +# Links in Desktop + + +# Desktop MCP Toolkit +"/ai/mcp-catalog-and-toolkit/toolkit/": + - /go/mcp-toolkit/ + +# Desktop DMR + +"/ai/model-runner/": + - /go/model-runner/ + +# Docker Desktop - volumes cloud backup +"/desktop/use-desktop/volumes/#export-a-volume": + - /go/volume-export/ + - /go/volume-export-aws/ + - /go/volume-export-azure/ + - /go/volume-export-gcs/ + +# Link used by Docker Desktop to refer users on how to activate WSL 2 +"/desktop/features/wsl/": + - /go/wsl2/ + +"/desktop/get-started/#credentials-management-for-linux-users": + - /go/linux-credentials/ +"/desktop/use-desktop/pause/": + - /go/mac-desktop-pause/ + - /go/win-desktop-pause/ +"/desktop/settings-and-maintenance/settings/#file-sharing": + - /go/mac-file-sharing/ + +"/desktop/use-desktop/container/": + - /go/container/ +"/desktop/use-desktop/images/": + - /go/images/ +"/desktop/use-desktop/volumes/": + - /go/volumes/ +"/extensions/": + - /go/extensions/ +"/engine/cli/completion/": + - /go/completion/ +"/desktop/features/vmm/": + - /go/vmm/ +"/security/for-admins/hardened-desktop/enhanced-container-isolation/": + - /go/eci/ +"/desktop/features/synchronized-file-sharing/": + - /go/synchronized-file-sharing/ +"/reference/cli/dockerd/": + - /go/daemon-config/ + - /engine/reference/commandline/dockerd/ +"/ai/gordon/": + - /go/gordon/ +"/desktop/features/wasm/": + - /go/wasm/ +"/compose/bridge/": + - /go/compose-bridge/ +"/desktop/settings-and-maintenance/settings/": + - /go/notifications/ +"/desktop/setup/install/mac-install/": + - /go/apple-silicon/ +"/desktop/setup/install/mac-permission-requirements/#installing-symlinks": + - /go/symlinks/ +"/desktop/setup/install/mac-permission-requirements/": + - /go/permissions/ +"/desktop/setup/install/mac-permission-requirements/#binding-privileged-ports": + - /go/port-mapping/ diff --git a/data/samples.yaml b/data/samples.yaml index a2e6b7db67cd..08bf3db727c2 100644 --- a/data/samples.yaml +++ b/data/samples.yaml @@ -37,7 +37,7 @@ samples: - mysql - title: NGINX / ASP.NET / MySQL url: https://github.com/docker/awesome-compose/tree/master/nginx-aspnet-mysql - description: A sample Nginx reverse proxy with an C# backend using ASP.NET. + description: A sample Nginx reverse proxy with a C# backend using ASP.NET. services: - nginx - .net @@ -350,4 +350,98 @@ samples: description: Get started with AI and ML using Docker, Neo4j, LangChain, and Ollama services: - python - - aiml \ No newline at end of file + - aiml +# Agentic AI ---------------------------- + - title: Agent-to-Agent + url: https://github.com/docker/compose-for-agents/tree/main/a2a + description: > + This app is a modular AI agent runtime built on Google's Agent + Development Kit (ADK) and the A2A (Agent-to-Agent) protocol. It wraps a + large language model (LLM)-based agent in an HTTP API and uses + structured execution flows with streaming responses, memory, and tools. + It is designed to make agents callable as network services and + composable with other agents. + services: + - python + - aiml + - agentic-ai + - title: ADK Multi-Agent Fact Checker + url: https://github.com/docker/compose-for-agents/tree/main/adk + description: > + This project demonstrates a collaborative multi-agent system built with + the Agent Development Kit (ADK), where a top-level Auditor agent coordinates + the workflow to verify facts. The Critic agent gathers evidence via live + internet searches using DuckDuckGo through the Model Context Protocol (MCP), + while the Reviser agent analyzes and refines the conclusion using internal + reasoning alone. The system showcases how agents with distinct roles and + tools can collaborate under orchestration. + services: + - python + - aiml + - agentic-ai + - title: DevDuck agents + url: https://github.com/docker/compose-for-agents/tree/main/adk-cerebras + description: > + A multi-agent system for Go programming assistance built with Google + Agent Development Kit (ADK). This project features a coordinating agent + (DevDuck) that manages two specialized sub-agents (Bob and Cerebras) + for different programming tasks. + services: + - python + - aiml + - agentic-ai + - title: Agno + url: https://github.com/docker/compose-for-agents/tree/main/agno + description: > + This app is a multi-agent orchestration system powered by LLMs (like Qwen + and OpenAI) and connected to tools via a Model Control Protocol (MCP) + gateway. Its purpose is to retrieve, summarize, and document GitHub + issues—automatically creating Notion pages from the summaries. It also + supports file content summarization from GitHub. + services: + - python + - aiml + - agentic-ai + - title: CrewAI + url: https://github.com/docker/compose-for-agents/tree/main/crew-ai + description: > + This project showcases an autonomous, multi-agent virtual marketing team + built with CrewAI. It automates the creation of a high-quality, end-to-end + marketing strategy — from research to copywriting — using task delegation, + web search, and creative synthesis. + services: + - python + - aiml + - agentic-ai + - title: SQL Agent with LangGraph + url: https://github.com/docker/compose-for-agents/tree/main/langgraph + description: > + This project demonstrates a zero-config AI agent that uses LangGraph to + answer natural language questions by querying a SQL database — all + orchestrated with Docker Compose. + services: + - python + - aiml + - agentic-ai + - title: Spring AI Brave Search Example - Model Context Protocol (MCP) + url: https://github.com/docker/compose-for-agents/tree/main/spring-ai + description: > + This example demonstrates how to create a Spring AI Model Context Protocol + (MCP) client that communicates with the Brave Search MCP Server. The + application shows how to build an MCP client that enables natural language + interactions with Brave Search, allowing you to perform internet searches + through a conversational interface. This example uses Spring Boot + autoconfiguration to set up the MCP client through configuration files. + services: + - java + - aiml + - agentic-ai + - title: MCP UI with Vercel AI SDK + url: https://github.com/docker/compose-for-agents/tree/main/vercel + description: > + Start an MCP UI application that uses the Vercel AI SDK to provide a + chat interface for local models, provided by the Docker Model Runner, + with access to MCPs from the Docker MCP Catalog. + services: + - aiml + - agentic-ai \ No newline at end of file diff --git a/data/summary.yaml b/data/summary.yaml index 225227f8ac83..9444678e5f46 100644 --- a/data/summary.yaml +++ b/data/summary.yaml @@ -3,64 +3,189 @@ Activity logs: for: Administrators Admin Console: subscription: [Business] - availability: Early access for: Administrators Admin orgs: subscription: [Team, Business] for: Administrators Air-gapped containers: - requires: Docker Desktop 4.29.0 and later + requires: Docker Desktop [4.29.0](/manuals/desktop/release-notes.md#4290) and later Allow list: for: Administrators Amazon S3 cache: availability: Experimental Ask Gordon: availability: Beta + requires: Docker Desktop [4.38.0](/manuals/desktop/release-notes.md#4380) or later Automated builds: subscription: [Pro, Team, Business] Azure blob: availability: Experimental -Build bake: - availability: Experimental +Build additional contexts: + requires: Docker Compose [2.17.0](/manuals/compose/releases/release-notes.md#2170) and later Build checks: availability: Beta - requires: Buildx v0.15.0 and later + requires: Docker Buildx [0.15.0](/manuals/compose/releases/release-notes.md#0150) and later +Build dockerfile inline: + requires: Docker Compose [2.17.0](/manuals/compose/releases/release-notes.md#2170) and later +Build entitlements: + requires: Docker Compose [2.27.1](/manuals/compose/releases/release-notes.md#2271) and later +Build multiple exporters: + requires: Docker Buildx [0.13.0](/manuals/build/release-notes.md#0130) and later +Buildkit host: + requires: Docker Buildx [0.9.0](/manuals/build/release-notes.md#090) and later +Build privileged: + requires: Docker Compose [2.15.0](/manuals/compose/releases/release-notes.md#2) and later +Build ulimits: + requires: Docker Compose [2.23.1](/manuals/compose/releases/release-notes.md#2231) and later +Buildx bake Git auth token: + requires: Docker Buildx [0.14.0](/manuals/build/release-notes.md#0140) and later +Buildx bake Git SSH: + requires: Docker Buildx [0.14.0](/manuals/build/release-notes.md#0140) and later +Buildx CPU profile: + requires: Docker Buildx [0.18.0](/manuals/build/release-notes.md#0180) and later +Buildx Git check dirty: + requires: Docker Buildx [0.10.4](/manuals/build/release-notes.md#0104) and later +Buildx Git info: + requires: Docker Buildx [0.10.0](/manuals/build/release-notes.md#0100) and later +Buildx Git labels: + requires: Docker Buildx [0.10.0](/manuals/build/release-notes.md#0100) and later +Buildx mem profile: + requires: Docker Buildx [0.18.0](/manuals/build/release-notes.md#0180) and later +Buildx metadata provenance: + requires: Docker Buildx [0.14.0](/manuals/build/release-notes.md#0140) and later +Buildx metadata warnings: + requires: Docker Buildx [0.16.0](/manuals/build/release-notes.md#0160) and later +Buildx no default: + requires: Docker Buildx [0.10.4](/manuals/build/release-notes.md#0104) and later Cache backend API: availability: Experimental Company: subscription: [Business] for: Administrators +Compliance reporting: + subscription: [Business] + for: Administrators + requires: Docker Desktop 4.40 and later + availability: Early Access +Compose attach: + requires: Docker Compose [2.20.0](/manuals/compose/releases/release-notes.md#2200) and later Compose bridge: - availability: Experimental + requires: Docker Desktop 4.43.0 and later +Config profiles: + requires: Docker Desktop 4.36 and later +Compose dependent images: + requires: Docker Compose [2.22.0](/manuals/compose/releases/release-notes.md#2220) and later +Compose cgroup: + requires: Docker Compose [2.15.0](/manuals/compose/releases/release-notes.md#2150) and later +Compose develop: + requires: Docker Compose [2.22.0](/manuals/compose/releases/release-notes.md#2220) and later +Compose driver opts: + requires: Docker Compose [2.27.1](/manuals/compose/releases/release-notes.md#2271) and later +Compose exec: + requires: Docker Compose [2.32.2](/manuals/compose/releases/release-notes.md#2232) and later +Compose experimental: + requires: Docker Compose [2.26.0](/manuals/compose/releases/release-notes.md#2260) and later +Compose enable ipv4: + requires: Docker Compose [2.33.1](/manuals/compose/releases/release-notes.md#2331) and later +Compose file watch: + requires: Docker Compose [2.22.0](/manuals/compose/releases/release-notes.md#2220) and later +Compose format: + requires: Docker Compose [2.30.0](/manuals/compose/releases/release-notes.md#2300) and later +Compose gpus: + requires: Docker Compose [2.30.0](/manuals/compose/releases/release-notes.md#2300) and later +Compose gw priority: + requires: Docker Compose [2.33.1](/manuals/compose/releases/release-notes.md#2331) and later +Compose include: + requires: Docker Compose [2.20.3](/manuals/compose/releases/release-notes.md#2203) and later +Compose interface-name: + requires: Docker Compose [2.36.0](/manuals/compose/releases/release-notes.md#2203) and later +Compose label file: + requires: Docker Compose [2.32.2](/manuals/compose/releases/release-notes.md#2232) and later +Compose lifecycle hooks: + requires: Docker Compose [2.30.0](/manuals/compose/releases/release-notes.md#2300) and later +Compose mac address: + requires: Docker Compose [2.23.2](/manuals/compose/releases/release-notes.md#2232) and later +Compose menu: + requires: Docker Compose [2.26.0](/manuals/compose/releases/release-notes.md#2260) and later +Compose models: + requires: Docker Compose [2.38.0](/manuals/compose/releases/release-notes.md#2380) and later +Compose model runner: + requires: Docker Compose [2.38.0](/manuals/compose/releases/release-notes.md#2350) and later, and Docker Desktop 4.43 and later +Compose OCI artifact: + requires: Docker Compose [2.34.0](/manuals/compose/releases/release-notes.md#2340) and later +Compose provider services: + requires: Docker Compose [2.36.0](/manuals/compose/releases/release-notes.md) and later +Compose progress: + requires: Docker Compose [2.36.0](/manuals/compose/releases/release-notes.md) and later +Compose replace file: + requires: Docker Compose [2.24.4](/manuals/compose/releases/release-notes.md#2244) and later +Compose required: + requires: Docker Compose [2.24.0](/manuals/compose/releases/release-notes.md#2271) and later +Compose post start: + requires: Docker Compose [2.30.0](/manuals/compose/releases/release-notes.md#2300) and later +Compose pre stop: + requires: Docker Compose [2.30.0](/manuals/compose/releases/release-notes.md#2300) and later +Compose uts: + requires: Docker Compose [2.15.1](/manuals/compose/releases/release-notes.md#2151) and later +Composefile include: + requires: Docker Compose [2.20.0](/manuals/compose/releases/release-notes.md#2200) and later containerd: availability: Experimental Dev Environments: availability: Beta Docker Build Cloud: subscription: [Pro, Team, Business] +Docker CLI OpenTelemetry: + requires: Docker Engine [26.1.0](/manuals/engine/release-notes/26.1.md#2610) and later +Docker Offload: + availability: Beta + requires: Docker Desktop 4.43 and later docker compose alpha: availability: Experimental +Docker Debug: + subscription: [Pro, Team, Business] + requires: Docker Desktop [4.33.0](/manuals/desktop/release-notes.md#4330) and later Docker Desktop Archlinux: availability: Experimental Docker Desktop CLI: - availability: Beta - requires: Docker Desktop 4.37 and later + requires: Docker Desktop [4.37](/manuals/desktop/release-notes.md#4370) and later +Docker Desktop CLI update: + requires: Docker Desktop 4.39 and later +Docker Desktop CLI logs: + requires: Docker Desktop 4.39 and later Docker GitHub Copilot: - availability: Early access + availability: Early Access +Docker Hardened Images: + subscription: [Docker Hardened Images] +Docker Init: + requires: Docker Desktop [4.27](/manuals/desktop/release-notes.md#4270) and later +Docker Model Runner: + availability: Beta + requires: Docker Engine or Docker Desktop (Windows) 4.41+ or Docker Desktop (MacOS) 4.40+ + for: See Requirements section below +Docker Projects: + availability: Beta Docker Scout exceptions: availability: Experimental - requires: Docker Scout CLI 1.15.0 and later + requires: Docker Scout CLI [1.15.0](/manuals/scout/release-notes/cli.md#1150) and later Docker Scout GitHub: availability: Beta Docker Scout health scores: subscription: [Pro, Team, Business] availability: Beta +Docker Scout Mount Permissions: + requires: Docker Desktop [4.34.0](/manuals/desktop/release-notes.md#4340) and later +Domain management: + subscription: [Business] + for: Administrators Domain audit: subscription: [Business] for: Administrators Enforce sign-in: - subscription: [Business] + subscription: [Team, Business] for: Administrators +Gated distribution: + availability: Early Access General admin: for: Administrators GitHub Actions cache: @@ -68,9 +193,13 @@ GitHub Actions cache: Hardened Docker Desktop: subscription: [Business] for: Administrators +Image management: + availability: Beta +Immutable tags: + availability: Beta Import builds: availability: Beta - requires: Docker Desktop 4.31 and later + requires: Docker Desktop [4.31](/manuals/desktop/release-notes.md#4310) and later Insights: subscription: [Business] for: Administrators @@ -78,16 +207,17 @@ Intune: for: Administrators Jamf Pro: for: Administrators -MSI Installer: - availability: Docker Desktop 4.32 and later +Load by default: + requires: Docker Buildx [0.14.0](/manuals/build/release-notes.md#0140) and later +MSI installer: + subscription: [Business] + requires: Docker Desktop [4.32](/manuals/desktop/release-notes.md#4320) and later for: Administrators OATs: subscription: [Team, Business] - availability: Beta PKG installer: subscription: [Business] - availability: Early access - requires: Docker Desktop 4.36 and later + requires: Docker Desktop [4.36](/manuals/desktop/release-notes.md#4360) and later for: Administrators Private marketplace: availability: Beta @@ -97,18 +227,20 @@ Remediation with Docker Scout: Registry access management: subscription: [Business] for: Administrators +SOCKS5 proxy support: + subscription: [Business] SSO: subscription: [Business] for: Administrators Synchronized file sharing: subscription: [Pro, Team, Business] - requires: Docker Desktop 4.27 and later USB/IP support: - requires: Docker Desktop 4.35.0 and later + requires: Docker Desktop [4.35.0](/manuals/desktop/release-notes.md#4350) and later + for: Docker Desktop for Mac, Linux, and Windows with the Hyper-V backend VMM: - availability: Beta - requires: Docker Desktop 4.35.0 and later + requires: Docker Desktop [4.35.0](/manuals/desktop/release-notes.md#4350) and later + for: Docker Desktop on Mac with Apple Silicon Wasm workloads: availability: Beta Wasmtime: - availability: Experimental \ No newline at end of file + availability: Experimental diff --git a/docker-bake.hcl b/docker-bake.hcl index dbbf1d568ccf..14c5750359ce 100644 --- a/docker-bake.hcl +++ b/docker-bake.hcl @@ -14,6 +14,10 @@ variable "DRY_RUN" { default = null } +variable "GITHUB_ACTIONS" { + default = null +} + group "default" { targets = ["release"] } @@ -36,7 +40,7 @@ target "release" { } group "validate" { - targets = ["lint", "test", "unused-media", "test-go-redirects", "dockerfile-lint", "path-warnings"] + targets = ["lint", "vale", "test", "unused-media", "test-go-redirects", "dockerfile-lint", "path-warnings", "validate-vendor"] } target "test" { @@ -51,6 +55,15 @@ target "lint" { provenance = false } +target "vale" { + target = "vale" + args = { + GITHUB_ACTIONS = GITHUB_ACTIONS + } + output = ["./tmp"] + provenance = false +} + target "unused-media" { target = "unused-media" output = ["type=cacheonly"] @@ -157,6 +170,11 @@ target "vendor" { provenance = false } +target "validate-vendor" { + target = "validate-vendor" + output = ["type=cacheonly"] +} + variable "UPSTREAM_MODULE_NAME" { default = null } diff --git a/go.mod b/go.mod index 500a6b9e9f24..0f7edf39e78b 100644 --- a/go.mod +++ b/go.mod @@ -1,21 +1,23 @@ module github.com/docker/docs -go 1.23.1 +go 1.24.0 require ( - github.com/docker/buildx v0.20.0 // indirect - github.com/docker/cli v27.5.0+incompatible // indirect - github.com/docker/compose/v2 v2.32.4 // indirect - github.com/docker/scout-cli v1.15.0 // indirect - github.com/moby/buildkit v0.19.0 // indirect - github.com/moby/moby v27.5.0+incompatible // indirect + github.com/docker/buildx v0.25.0 // indirect + github.com/docker/cli v28.3.3-0.20250711132746-c69d8bde4adc+incompatible // indirect + github.com/docker/compose/v2 v2.38.2 // indirect + github.com/docker/model-cli v0.1.33-0.20250703103301-d4e4936a9eb2 // indirect + github.com/docker/scout-cli v1.18.1 // indirect + github.com/moby/buildkit v0.23.2 // indirect + github.com/moby/moby v28.3.2+incompatible // indirect ) replace ( - github.com/docker/buildx => github.com/docker/buildx v0.20.0 - github.com/docker/cli => github.com/docker/cli v27.5.0+incompatible - github.com/docker/compose/v2 => github.com/docker/compose/v2 v2.32.4 - github.com/docker/scout-cli => github.com/docker/scout-cli v1.15.0 - github.com/moby/buildkit => github.com/moby/buildkit v0.19.0 - github.com/moby/moby => github.com/moby/moby v27.5.0+incompatible + github.com/docker/buildx => github.com/docker/buildx v0.25.0 + github.com/docker/cli => github.com/docker/cli v28.3.3-0.20250711132746-c69d8bde4adc+incompatible + github.com/docker/compose/v2 => github.com/docker/compose/v2 v2.38.2 + github.com/docker/model-cli => github.com/docker/model-cli v0.1.33-0.20250703103301-d4e4936a9eb2 + github.com/docker/scout-cli => github.com/docker/scout-cli v1.18.1 + github.com/moby/buildkit => github.com/moby/buildkit v0.23.2 + github.com/moby/moby => github.com/moby/moby v28.3.2+incompatible ) diff --git a/go.sum b/go.sum index ba47eca2db11..b6bb6c8374d5 100644 --- a/go.sum +++ b/go.sum @@ -1,404 +1,325 @@ -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412/go.mod h1:WPjqKcmVOxf0XSf3YxCJs6N6AOSrOx3obionmG7T0y0= -github.com/apparentlymart/go-cidr v1.0.1/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= -github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bugsnag/bugsnag-go v1.4.1/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= -github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= -github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cloudflare/cfssl v0.0.0-20181213083726-b94e044bb51e/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA= -github.com/compose-spec/compose-spec v0.0.0-20230623140450-d63a678732a3 h1:YbpEs/CNAGa/2lsASxr2XAcOMQBeMVd6uQLLZxUD4Dc= -github.com/compose-spec/compose-spec v0.0.0-20230623140450-d63a678732a3/go.mod h1:SkxF2HOKkzpp+mNdvGONzi3g2shI/M94GkMgsE0JLuk= -github.com/compose-spec/compose-spec v0.0.0-20230626141542-c7f842ce463a h1:9gsWcK+jNkxqKaFyt5hkMA+lNXnyLC0gSJPUQF9h/CI= -github.com/compose-spec/compose-spec v0.0.0-20230626141542-c7f842ce463a/go.mod h1:SkxF2HOKkzpp+mNdvGONzi3g2shI/M94GkMgsE0JLuk= -github.com/compose-spec/compose-spec v0.0.0-20230629051316-305289eead60 h1:0wyrkmqb40WXecFlMRxxKA+iA9XqoCDgyXCud/Y5OwY= -github.com/compose-spec/compose-spec v0.0.0-20230629051316-305289eead60/go.mod h1:SkxF2HOKkzpp+mNdvGONzi3g2shI/M94GkMgsE0JLuk= -github.com/compose-spec/compose-spec v0.0.0-20230713225457-40c2421be01f h1:9gcoN1ndvlPiauBc4M6fxTIbTd/bWpqRYawdXULJq3E= -github.com/compose-spec/compose-spec v0.0.0-20230713225457-40c2421be01f/go.mod h1:SkxF2HOKkzpp+mNdvGONzi3g2shI/M94GkMgsE0JLuk= -github.com/compose-spec/compose-spec v0.0.0-20230719125707-55b450aee507 h1:k7z47/imAaUlZuMVpNTRX0h6E2dYQ5YQ/DUJJAP4680= -github.com/compose-spec/compose-spec v0.0.0-20230719125707-55b450aee507/go.mod h1:SkxF2HOKkzpp+mNdvGONzi3g2shI/M94GkMgsE0JLuk= -github.com/compose-spec/compose-spec v0.0.0-20230724132559-6907af1d7a8b h1:VbdFseBE88G/bW9LIynx77QMTZpW8kX9/ydDMyGtgZI= -github.com/compose-spec/compose-spec v0.0.0-20230724132559-6907af1d7a8b/go.mod h1:SkxF2HOKkzpp+mNdvGONzi3g2shI/M94GkMgsE0JLuk= -github.com/compose-spec/compose-spec v0.0.0-20230810081227-631f10f1aabc h1:E93Ppj6xhV/QYCNTg0oWAnsJxuy7v5X3vLH+6iSp7IY= -github.com/compose-spec/compose-spec v0.0.0-20230810081227-631f10f1aabc/go.mod h1:SkxF2HOKkzpp+mNdvGONzi3g2shI/M94GkMgsE0JLuk= -github.com/compose-spec/compose-spec v0.0.0-20230824062516-848a47ad5d0e h1:qmkQEx3/l6a4ofsiU+b1gDRkANoy55uyc3EXp1n77/Y= -github.com/compose-spec/compose-spec v0.0.0-20230824062516-848a47ad5d0e/go.mod h1:SkxF2HOKkzpp+mNdvGONzi3g2shI/M94GkMgsE0JLuk= -github.com/compose-spec/compose-spec v0.0.0-20230927132538-f223c5150d5d h1:EHvVBP6ZYAz5KXU5/iA3K6Z7G7haxPm44g/08tueZSw= -github.com/compose-spec/compose-spec v0.0.0-20230927132538-f223c5150d5d/go.mod h1:SkxF2HOKkzpp+mNdvGONzi3g2shI/M94GkMgsE0JLuk= -github.com/compose-spec/compose-spec v0.0.0-20231121152139-478928e7c9f8 h1:WQU6c3MGdIxVcDRC+Qstk9bJuYvHLvbyfkN8fStL1Qk= -github.com/compose-spec/compose-spec v0.0.0-20231121152139-478928e7c9f8/go.mod h1:SkxF2HOKkzpp+mNdvGONzi3g2shI/M94GkMgsE0JLuk= -github.com/containerd/fuse-overlayfs-snapshotter v1.0.2/go.mod h1:nRZceC8a7dRm3Ao6cJAwuJWPFiBPaibHiFntRUnzhwU= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= +github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/containerd/containerd/v2 v2.1.3 h1:eMD2SLcIQPdMlnlNF6fatlrlRLAeDaiGPGwmRKLZKNs= +github.com/containerd/containerd/v2 v2.1.3/go.mod h1:8C5QV9djwsYDNhxfTCFjWtTBZrqjditQ4/ghHSYjnHM= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/platforms v1.0.0-rc.1 h1:83KIq4yy1erSRgOVHNk1HYdPvzdJ5CnsWaRoJX4C41E= +github.com/containerd/platforms v1.0.0-rc.1/go.mod h1:J71L7B+aiM5SdIEqmd9wp6THLVRzJGXfNuWCZCllLA4= +github.com/containerd/stargz-snapshotter v0.16.3 h1:zbQMm8dRuPHEOD4OqAYGajJJUwCeUzt4j7w9Iaw58u4= +github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8= +github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU= +github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40= +github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/distribution/distribution v2.8.2+incompatible h1:k9+4DKdOG+quPFZXT/mUsiQrGu9vYCp+dXpuPkuqhk8= -github.com/distribution/distribution v2.8.2+incompatible/go.mod h1:EgLm2NgWtdKgzF9NpMzUKgzmR7AMmb0VQi2B+ZzDRjc= -github.com/distribution/distribution v2.8.3+incompatible h1:RlpEXBLq/WPXYvBYMDAmBX/SnhD67qwtvW/DzKc8pAo= -github.com/distribution/distribution v2.8.3+incompatible/go.mod h1:EgLm2NgWtdKgzF9NpMzUKgzmR7AMmb0VQi2B+ZzDRjc= -github.com/distribution/distribution v2.8.4-0.20231004140828-d607c6ccb937+incompatible h1:P+KGSuf9hFRNDeS5I8fnEkExK8W/GOr6mzn8aCkVzSU= -github.com/distribution/distribution v2.8.4-0.20231004140828-d607c6ccb937+incompatible/go.mod h1:EgLm2NgWtdKgzF9NpMzUKgzmR7AMmb0VQi2B+ZzDRjc= -github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= -github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/buildx v0.11.0 h1:DNCOIYT/7J0sPBlU/ozEhFd4MtbnbFByn45yeTMHXVU= -github.com/docker/buildx v0.11.0/go.mod h1:Yq7ZNjrwXKzW0uSFMk46dl5Gl903k5+bp6U4apsM5rs= -github.com/docker/buildx v0.11.1 h1:xfmrAkOJrN+NLRcwhZn1iBgJVAK1dEBEv8lWu1Wxg14= -github.com/docker/buildx v0.11.1/go.mod h1:qAxs3bsJEfVo7DOc9riES/f9Z187CeGM5nLPmadk8AA= -github.com/docker/buildx v0.11.2 h1:R3p9F0gnI4FwvQ0p40UwdX1T4ugap4UWxY3TFHoP4Ws= -github.com/docker/buildx v0.11.2/go.mod h1:CWAABt10iIuGpleypA3103mplDfcGu0A2AvT03xfpTc= -github.com/docker/buildx v0.12.0 h1:pI4jr4SeH9oHa0SmMvH/lz+Rdqkg+dRa9H/1VXbYgws= -github.com/docker/buildx v0.12.0/go.mod h1:SBLnQH9q+77aVvpvS5LLIly9+nHVlwscl5GEegGMD5g= -github.com/docker/buildx v0.12.1-0.20231214091505-b68ee824c673 h1:mZ2+TyEERNA4GY2xO3kIa7ZhfmUNwveIMxGYWV126dA= -github.com/docker/buildx v0.12.1-0.20231214091505-b68ee824c673/go.mod h1:SBLnQH9q+77aVvpvS5LLIly9+nHVlwscl5GEegGMD5g= -github.com/docker/buildx v0.12.1 h1:oahmdKmkGaa8NnaWKvtDZe2vpSYsKZ+WsHOMLQTDCk8= -github.com/docker/buildx v0.12.1/go.mod h1:SBLnQH9q+77aVvpvS5LLIly9+nHVlwscl5GEegGMD5g= -github.com/docker/buildx v0.12.2-0.20240126114058-d43cf8c2c6b4 h1:aEFpoxTw5LIRjN0jFdHsOa1POpELzgwJ0SnhykCa8dg= -github.com/docker/buildx v0.12.2-0.20240126114058-d43cf8c2c6b4/go.mod h1:SBLnQH9q+77aVvpvS5LLIly9+nHVlwscl5GEegGMD5g= -github.com/docker/buildx v0.12.2-0.20240220084849-89154c7d3303 h1:J34paheV5gSKezhnVzwT5WwRQgPzBLYAdCYAFPoEYfU= -github.com/docker/buildx v0.12.2-0.20240220084849-89154c7d3303/go.mod h1:OoLv85M5U/p8TWyCINtEilyy0A0XTN9COQgmuE0bWhw= -github.com/docker/buildx v0.13.0 h1:nNbkgaxsWEZPX1P8yXN6dibAv7ADRMVqi0aohDFhLJY= -github.com/docker/buildx v0.13.0/go.mod h1:f2n6vggoX4sNNZ0XoRZ0Wtv6J1/rbDTabgdHtpW9NNM= -github.com/docker/buildx v0.13.1-0.20240307093612-37b7ad1465d2 h1:kuFvsZyZCYqxWBc3O7B95wHAoYKheuZYztIHstwnF7Y= -github.com/docker/buildx v0.13.1-0.20240307093612-37b7ad1465d2/go.mod h1:f2n6vggoX4sNNZ0XoRZ0Wtv6J1/rbDTabgdHtpW9NNM= -github.com/docker/buildx v0.13.1 h1:uZjBcb477zh02tnHk0rqNV/DZOxbf/OiHw6Mc8OhDYU= -github.com/docker/buildx v0.13.1/go.mod h1:f2n6vggoX4sNNZ0XoRZ0Wtv6J1/rbDTabgdHtpW9NNM= -github.com/docker/buildx v0.14.0 h1:FxqcfE7xgeEC4oQlKLpuvfobRDVDXrHE3jByM+mdyqk= -github.com/docker/buildx v0.14.0/go.mod h1:Vy/2lC9QsJvo33+7KKkN/GDE5WxnVqW0/dpcN7ZqPJY= -github.com/docker/buildx v0.14.1 h1:Pr3HdtHoDsCghlIExgGp0WOIgvbiViushOKIPUIyFI4= -github.com/docker/buildx v0.14.1/go.mod h1:s6xxLYXZIWnkdYpSvxRmoqZTb1vViV9q2f+Hg8cWA3Y= -github.com/docker/buildx v0.15.0 h1:PVq4IMnTvw1Sx0RKDWbfi2eTGawFd9CMBYnz9xat93Y= -github.com/docker/buildx v0.15.0/go.mod h1:AdkB1RIcU4rfZ6mpw2PA2pOi1ppI9yvFXkVEpq5EmS4= -github.com/docker/buildx v0.15.1 h1:1cO6JIc0rOoC8tlxfXoh1HH1uxaNvYH1q7J7kv5enhw= -github.com/docker/buildx v0.15.1/go.mod h1:16DQgJqoggmadc1UhLaUTPqKtR+PlByN/kyXFdkhFCo= -github.com/docker/buildx v0.16.0 h1:LurEflyb6BBoLtDwJY1dw9dLHKzEgGvCjAz67QI0xO0= -github.com/docker/buildx v0.16.0/go.mod h1:4xduW7BOJ2B11AyORKZFDKjF6Vcb4EgTYnV2nunxv9I= -github.com/docker/buildx v0.16.2 h1:SPcyEiiCZEntJQ+V0lJI8ZudUrki2v1qUqmC/NqxDDs= -github.com/docker/buildx v0.16.2/go.mod h1:by+CuE4Q+2NvECkIhNcWe89jjbHADCrDlzS9MRgbv2k= -github.com/docker/buildx v0.17.0 h1:Z+QQxsJJPldaeU/4aNXoudFwDDK0/ALFYmDcP5q5fiY= -github.com/docker/buildx v0.17.0/go.mod h1:sBKkoZFs+R2D6ARyQ4/GE/FQHHFsl9PkHdvv/GXAsMo= -github.com/docker/buildx v0.17.1 h1:9ob2jGp4+W9PxWw68GsoNFp+eYFc7eUoRL9VljLCSM4= -github.com/docker/buildx v0.17.1/go.mod h1:kJOhOhS47LRvrLFRulFiO5SE6VJf54yYMn7DzjgO5W0= -github.com/docker/buildx v0.18.0 h1:rSauXHeJt90NvtXrLK5J992Eb0UPJZs2vV3u1zTf1nE= -github.com/docker/buildx v0.18.0/go.mod h1:JGNSshOhHs5FhG3u51jXUf4lLOeD2QBIlJ2vaRB67p4= -github.com/docker/buildx v0.19.1 h1:muQEvRJLvOCS0p/67gPwjnQPWqE5ot3sGkb2Eq7vCmw= -github.com/docker/buildx v0.19.1/go.mod h1:k4WP+XmGRYL0a7l4RZAI2TqpwhuAuSQ5U/rosRgFmAA= -github.com/docker/buildx v0.19.2 h1:2zXzgP2liQKgQ5BiOqMc+wz7hfWgAIMWw5MR6QDG++I= -github.com/docker/buildx v0.19.2/go.mod h1:k4WP+XmGRYL0a7l4RZAI2TqpwhuAuSQ5U/rosRgFmAA= -github.com/docker/buildx v0.20.0 h1:XM2EvwEfohbxLPAheVm03biNHpspB/dA6U9F0c6yJsI= -github.com/docker/buildx v0.20.0/go.mod h1:VVi4Nvo4jd/IkRvwyExbIyW7u82fivK61MRx5I0oKic= -github.com/docker/cli v24.0.2+incompatible h1:QdqR7znue1mtkXIJ+ruQMGQhpw2JzMJLRXp6zpzF6tM= -github.com/docker/cli v24.0.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v24.0.4+incompatible h1:Y3bYF9ekNTm2VFz5U/0BlMdJy73D+Y1iAAZ8l63Ydzw= -github.com/docker/cli v24.0.4+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v24.0.5+incompatible h1:WeBimjvS0eKdH4Ygx+ihVq1Q++xg36M/rMi4aXAvodc= -github.com/docker/cli v24.0.5+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v24.0.6+incompatible h1:fF+XCQCgJjjQNIMjzaSmiKJSCcfcXb3TWTcc7GAneOY= -github.com/docker/cli v24.0.6+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v24.0.8-0.20231106123152-48ec4f339e2b+incompatible h1:tXZk7C97vINae9YYzPtqoClal32VoMl8gz6gLjJ6Kdg= -github.com/docker/cli v24.0.8-0.20231106123152-48ec4f339e2b+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v24.0.8-0.20231211210310-fb2f337bc1b1+incompatible h1:3hcKPFXxdqoVzoIrjNF1NNb36kzFiEimwUegOhw0pC0= -github.com/docker/cli v24.0.8-0.20231211210310-fb2f337bc1b1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v24.0.8-0.20231213094340-0f82fd88610a+incompatible h1:OSAexdrbbGYSMOdaskqxEQR9N3CNv59ypbhD032P1TI= -github.com/docker/cli v24.0.8-0.20231213094340-0f82fd88610a+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v24.0.8-0.20240103162225-b0c5946ba5d8+incompatible h1:hHBH6X0fAXobxcFmnrqiiUB+I0WrQ+65pjMZGW7p8h8= -github.com/docker/cli v24.0.8-0.20240103162225-b0c5946ba5d8+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v25.0.0-beta.1+incompatible h1:bJzIgR4mKNpceAwwi19SqZK0AbztMc3nQTgnvxxyY/A= -github.com/docker/cli v25.0.0-beta.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v25.0.1-0.20240119143135-01f933261885+incompatible h1:UkZcGfKrx1PUDTT/TEzeYpyeRvNVbNqsj01yasxHuvA= -github.com/docker/cli v25.0.1-0.20240119143135-01f933261885+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v25.0.1+incompatible h1:mFpqnrS6Hsm3v1k7Wa/BO23oz0k121MTbTO1lpcGSkU= -github.com/docker/cli v25.0.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v25.0.4-0.20240221083216-f67e569a8fb9+incompatible h1:crlBDc5Kfph4aUtWf9Rz+BtcNdB17bE3NLU+3+WuAaw= -github.com/docker/cli v25.0.4-0.20240221083216-f67e569a8fb9+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v25.0.4+incompatible h1:DatRkJ+nrFoYL2HZUzjM5Z5sAmcA5XGp+AW0oEw2+cA= -github.com/docker/cli v25.0.4+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v26.0.0+incompatible h1:90BKrx1a1HKYpSnnBFR6AgDq/FqkHxwlUyzJVPxD30I= -github.com/docker/cli v26.0.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v26.1.0+incompatible h1:+nwRy8Ocd8cYNQ60mozDDICICD8aoFGtlPXifX/UQ3Y= -github.com/docker/cli v26.1.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v26.1.3-0.20240513184838-60f2d38d5341+incompatible h1:9bTMRZTbwJvSrosCeCWS9o9cxtBxxpwOiwlrJZwSWb8= -github.com/docker/cli v26.1.3-0.20240513184838-60f2d38d5341+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v27.0.1+incompatible h1:d/OrlblkOTkhJ1IaAGD1bLgUBtFQC/oP0VjkFMIN+B0= -github.com/docker/cli v27.0.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v27.0.3+incompatible h1:usGs0/BoBW8MWxGeEtqPMkzOY56jZ6kYlSN5BLDioCQ= -github.com/docker/cli v27.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v27.1.1+incompatible h1:goaZxOqs4QKxznZjjBWKONQci/MywhtRv2oNn0GkeZE= -github.com/docker/cli v27.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v27.2.0+incompatible h1:yHD1QEB1/0vr5eBNpu8tncu8gWxg8EydFPOSKHzXSMM= -github.com/docker/cli v27.2.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v27.2.2-0.20240909090509-65decb573126+incompatible h1:if+XpfWkGSpLf8NtVlYgvCeVvKW4Eba90LispMGC50M= -github.com/docker/cli v27.2.2-0.20240909090509-65decb573126+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v27.3.0+incompatible h1:h7J5eiGdUbH2Q4EcGr1mFb20qzS7Nrot3EI9hwycpK0= -github.com/docker/cli v27.3.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v27.3.1+incompatible h1:qEGdFBF3Xu6SCvCYhc7CzaQTlBmqDuzxPDpigSyeKQQ= -github.com/docker/cli v27.3.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v27.3.2-0.20241107125754-eb986ae71b0c+incompatible h1:KqHavmeo5+ct30Z2UKxbVhUEfuzI9JZFgPdVVaWS4Uc= -github.com/docker/cli v27.3.2-0.20241107125754-eb986ae71b0c+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v27.4.0+incompatible h1:/nJzWkcI1MDMN+U+px/YXnQWJqnu4J+QKGTfD6ptiTc= -github.com/docker/cli v27.4.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v27.5.0+incompatible h1:aMphQkcGtpHixwwhAXJT1rrK/detk2JIvDaFkLctbGM= -github.com/docker/cli v27.5.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/compose-cli v1.0.35 h1:uZyEHLalfqBS2PiTpA1LAULyJmuQ+YtZg7nG4Xl3/Cc= -github.com/docker/compose-cli v1.0.35/go.mod h1:mSXI4hFLpRU3EtI8NTo32bNwI0UXSr8jnq+/rYjGAUU= -github.com/docker/compose/v2 v2.22.0 h1:3rRz4L7tPU75wRsV8JZh2/aTgerQvPa1cpzZN+tHqUY= -github.com/docker/compose/v2 v2.22.0/go.mod h1:W+OVmnkJP0a62v8KnjtpXS0qrOdLnrxGJmKEU2dD4IQ= -github.com/docker/compose/v2 v2.23.0 h1:OX1MiAUn8LSfW0F3yOhUYnKZhnSj9qy29fSJn3tT3h4= -github.com/docker/compose/v2 v2.23.0/go.mod h1:548Y4k6qPQXdpouRp3EPx76k/ATYU5nrxULPSOsMM9U= -github.com/docker/compose/v2 v2.23.1 h1:wLgblcBfAgbXeNaxxKAIL//I+xgoyZ1BBbWFNfoLQ3I= -github.com/docker/compose/v2 v2.23.1/go.mod h1:FCqosV9Gc3/QOoRgYSjVnweNqDyr6LsAyLca5VPrEGU= -github.com/docker/compose/v2 v2.23.3 h1:2p2biZqpUvPzC8J7nDl+ankVQNCCAk2IZJ4eg1S+6BE= -github.com/docker/compose/v2 v2.23.3/go.mod h1:lUweVMN13YR0a9M7qdKulTSMS1kYdAysYNJlFEnDMCw= -github.com/docker/compose/v2 v2.24.0 h1:Gvmg3E5/Rqa4G340sYcUk/DIegT5Nod2ZV3MqR248j8= -github.com/docker/compose/v2 v2.24.0/go.mod h1:sDypMTKq6Mrp0W5NZ6+uiqxR9zEukI1RVuFRqwBTljs= -github.com/docker/compose/v2 v2.24.1 h1:Mk14AOkxetMKrWb1bOnx7bEfS+v/moaCZnU69QqUw6A= -github.com/docker/compose/v2 v2.24.1/go.mod h1:rrqu0bPBN/HD2wRSNwVN+V9SDfhVQnKxF1DP9B9WOdI= -github.com/docker/compose/v2 v2.24.2-0.20240119115212-388169011f47 h1:4vvuN0itjUryASt/WgrrLt7Tat7L53Ovt6Y1tLmVZPQ= -github.com/docker/compose/v2 v2.24.2-0.20240119115212-388169011f47/go.mod h1:YMMi6kNJdi3gELhMyhdnZinMiZvSWoyAl6i7XoeqFDg= -github.com/docker/compose/v2 v2.24.2 h1:uxH1Be7w/T2eozzgcftsTuOVOsxsOruJwJFLu6wsC6I= -github.com/docker/compose/v2 v2.24.2/go.mod h1:0PmmvCX+jo4kCp9JJxFY/Za7nCPwSzzRcX/g8y2gXTA= -github.com/docker/compose/v2 v2.24.4 h1:uBlpNmv27Gd9vlExUkQcgmLuNYlGloxX6yCt7Oau8vk= -github.com/docker/compose/v2 v2.24.4/go.mod h1:409UYNwh+eoKY5UST4ORZMuMNj+nBUuEZC3V+WF8CqA= -github.com/docker/compose/v2 v2.24.5 h1:7K173fhy+ghA88C8ib5YNa+kAZCx0CBeGW7lHcdoPZw= -github.com/docker/compose/v2 v2.24.5/go.mod h1:gg+RsqCXYD/TOIJgya4N9mtj/UmFJGEls7y3h/kadVE= -github.com/docker/compose/v2 v2.24.6 h1:V5fOXgga0hYy4wHsygCquO6/k++8q3WuckU7Qo1cnXk= -github.com/docker/compose/v2 v2.24.6/go.mod h1:ugV3/2KoKEeM98ZYF9vsYwnSExC4xLGxblAqXB6HUXQ= -github.com/docker/compose/v2 v2.24.7 h1:1WSo4CVf18tnGJMC6V78jYsAxSDD61ry6L3JwVT+8EI= -github.com/docker/compose/v2 v2.24.7/go.mod h1:7U3QbXdRJfBylTgkdlrjOg8hWLZqM09mof9DVZ5Fh4E= -github.com/docker/compose/v2 v2.25.0 h1:UMCrWFItKdXXrlbxvA63V3aFb4Nr3zmlSY2GvJIqJW0= -github.com/docker/compose/v2 v2.25.0/go.mod h1:M0PSYeTsp2ZEZJGhvzNTBtJeJRN7ZBGb4Ft1mUteTac= -github.com/docker/compose/v2 v2.26.1 h1:27fAR5jVzNUYwY/9ppIjrPqGYLW5HtOTq2aYGBMCtA0= -github.com/docker/compose/v2 v2.26.1/go.mod h1:5iVCMlr18ab0NlMxIPdtTgThTkzb34Z/zj15N7KSW+s= -github.com/docker/compose/v2 v2.27.0 h1:FKyClQdErCxUZULC2zo6Jn5ve+epFPe/Y0HaxjmUzNg= -github.com/docker/compose/v2 v2.27.0/go.mod h1:uaqwmY6haO8wXWHk+LAsqqDapX6boH4izRKqj/E7+Bo= -github.com/docker/compose/v2 v2.27.2 h1:8uvz019AZIPmw8+rnLBubAuyt9SEoU/pyCcAJPXMq0A= -github.com/docker/compose/v2 v2.27.2/go.mod h1:c6QJ/VVZrzVu97Ur1jylFLivvTkFdef1rSzmnQAO3DA= -github.com/docker/compose/v2 v2.28.0 h1:GSE/IC/cgWsv7KA5uoBrwVdHQbWS1R5XvvcEgoAQUOY= -github.com/docker/compose/v2 v2.28.0/go.mod h1:YDhEGZHuiXEqJu5Clc8N4si/CJIXHWU0Lf1ph9NxFYA= -github.com/docker/compose/v2 v2.28.1 h1:ORPfiVHrpnRQBDoC3F8JJyWAY8N5gWuo3FgwyivxFdM= -github.com/docker/compose/v2 v2.28.1/go.mod h1:wDtGQFHe99sPLCHXeVbCkc+Wsl4Y/2ZxiAJa/nga6rA= -github.com/docker/compose/v2 v2.29.0 h1:qPBhzfjT2zkxUXuu+TcbQq292bPpB0ozzVHot2w2IN0= -github.com/docker/compose/v2 v2.29.0/go.mod h1:95QFO8lue3WJmLUDSdOLBkm7KdGhcG6U+RvVxrQIzOo= -github.com/docker/compose/v2 v2.29.2 h1:gRlR2ApZ0IGcwmSUb/wlEVCk18Az8b7zl03hJArldOg= -github.com/docker/compose/v2 v2.29.2/go.mod h1:U+yqqZqYPhILehkmmir+Yh7ZhCfkKqAvaZdrM47JBRs= -github.com/docker/compose/v2 v2.30.0 h1:EjtEBeIPeqzlY3DUQhdjkiMwigX8TrUrgPAyAqey1d0= -github.com/docker/compose/v2 v2.30.0/go.mod h1:WlU5gYgsYfNLuDeUdTusvutEC5PV3sDc15aClbR5lPw= -github.com/docker/compose/v2 v2.30.1 h1:AwDaEcmgskxaI75Wjt3KL6/Xqq/GXKUQcBpo/RqMEkw= -github.com/docker/compose/v2 v2.30.1/go.mod h1:pt/uv8KQ6VaM0IbHZwB1UdwDIs9PB4nN4LoWst+dqXc= -github.com/docker/compose/v2 v2.30.2 h1:7PypFsyl5wjlSeOyx3LCb8XMcAGkb+D0fqM47OIKe8I= -github.com/docker/compose/v2 v2.30.2/go.mod h1:ND4+yaNoJ3Jh1OgrEO64uzMq/VKRqBkMS8zpb65Fve8= -github.com/docker/compose/v2 v2.30.3 h1:e8H7xGLCZOeFo46GEtyDGHlkBbNgXqbXKIXPOSL8cfU= -github.com/docker/compose/v2 v2.30.3/go.mod h1:ayPsSsRSc5WpVFehPrTDFuljAydxaf8g0aM9UKbaMXk= -github.com/docker/compose/v2 v2.31.0 h1:8Sm0c4MjIhksguxIA5koYMXoTJDAp/CaZ1cdZrMvMdw= -github.com/docker/compose/v2 v2.31.0/go.mod h1:oQq3UDEdsnB3AUO72AxaoeLbkCgmUu1+8tLzvmphmXA= -github.com/docker/compose/v2 v2.32.2/go.mod h1:fcK4rrf1bm8pfDsYdZIR+l4RSk9j6HVtBvJKGYyXsZ4= -github.com/docker/compose/v2 v2.32.3 h1:7KKVpTudYUrqs9GueTnJ+N6qnnzI2bqmANq6kXfmuv8= -github.com/docker/compose/v2 v2.32.3/go.mod h1:fcK4rrf1bm8pfDsYdZIR+l4RSk9j6HVtBvJKGYyXsZ4= -github.com/docker/compose/v2 v2.32.4 h1:h1I7GlJ1NCXKqM0nCVVsMrD/7TdIG48HNgRufcBF1KQ= -github.com/docker/compose/v2 v2.32.4/go.mod h1:fcK4rrf1bm8pfDsYdZIR+l4RSk9j6HVtBvJKGYyXsZ4= -github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= -github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/buildx v0.24.0 h1:qiD+xktY+Fs3R79oz8M+7pbhip78qGLx6LBuVmyb+64= +github.com/docker/buildx v0.24.0/go.mod h1:vYkdBUBjFo/i5vUE0mkajGlk03gE0T/HaGXXhgIxo8E= +github.com/docker/buildx v0.25.0 h1:qs5WxBo0wQKSXcQ+v6UhWaeM2Pu+95ZCymaimRzInaE= +github.com/docker/buildx v0.25.0/go.mod h1:xJcOeBhz49tgqN174MMGuOU4bxNmgfaLnZn7Gm641EE= +github.com/docker/cli v28.2.1+incompatible h1:AYyTcuwvhl9dXdyCiXlOGXiIqSNYzTmaDNpxIISPGsM= +github.com/docker/cli v28.2.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v28.3.0+incompatible h1:s+ttruVLhB5ayeuf2BciwDVxYdKi+RoUlxmwNHV3Vfo= +github.com/docker/cli v28.3.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v28.3.3-0.20250711132746-c69d8bde4adc+incompatible h1:5I561JBDi4n0RKxYNkUVc9xiLnlCfjjm31XRV0r3o98= +github.com/docker/cli v28.3.3-0.20250711132746-c69d8bde4adc+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/compose/v2 v2.36.2 h1:rxk1PUUbhbAS6HkGsYo9xUmMBpKtVwFMNCQjE4+i5fk= +github.com/docker/compose/v2 v2.36.2/go.mod h1:mZygkne+MAMu/e1B28PBFmG0Z0WefbxZ/IpcjSFdrw8= +github.com/docker/compose/v2 v2.37.0 h1:R8Yik9ssiRz7T9BRfdOZy0xHDzOFPIJX40DrxzJ62dQ= +github.com/docker/compose/v2 v2.37.0/go.mod h1:twDoqUBFO2L5+vccJjkR6shQOH8C50V8AAQPxlkFr2Q= +github.com/docker/compose/v2 v2.37.1 h1:d/LO338bB7jxHvQwVHSBAjIgitDK2+Dl5IXJImL/bAA= +github.com/docker/compose/v2 v2.37.1/go.mod h1:yyprfHgPYV+ydOoL1gp8nIIlZ730ughSvz8D1VamayU= +github.com/docker/compose/v2 v2.37.3 h1:RKaTVsWmqvJd6GP9EWPZ6fu4ezl8tfG1V7bRijToGJI= +github.com/docker/compose/v2 v2.37.3/go.mod h1:U5PKGy7r7M7u2oVhz41NzlNglJFCdMrrThBOH5x00hk= +github.com/docker/compose/v2 v2.38.1 h1:UNbMX6UbpdvdW3xjljD05fcL4jQmHGLh7y9VDgPp29Q= +github.com/docker/compose/v2 v2.38.1/go.mod h1:0Jn/JGDGghZ9JlhY4DJY2/Cs15EDIH2nnagA66Lu3Dw= +github.com/docker/compose/v2 v2.38.2 h1:yY3jocdj1JkHbSgAyGaDLAh8fKOykZ8LVPitNOyo9/0= +github.com/docker/compose/v2 v2.38.2/go.mod h1:zigTfE9jJq/wFZPUCbNJtOC2YpSXBn0d3Xm/8EOs3Rk= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= -github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c/go.mod h1:CADgU4DSXK5QUlFslkQu2yW2TKzFZcXq/leZfM0UH5Q= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= -github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= -github.com/docker/libtrust v0.0.0-20150526203908-9cbd2a1374f4/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= -github.com/docker/scout-cli v0.16.1 h1:kIcWkaA+cEwnC0nIjNlc8dnlXbV4D8Vyu3yrRUPB9xs= -github.com/docker/scout-cli v0.16.1/go.mod h1:Eo1RyCJsx3ldz/YTY5yGxu9g9mwTYbRUutxQUkow3Fc= -github.com/docker/scout-cli v0.16.2-0.20230623100159-43178dbabf53 h1:FA7tj4Bnu+jFJdwB5D6CCGTYvLfNkjGqjvvZmg3fcLc= -github.com/docker/scout-cli v0.16.2-0.20230623100159-43178dbabf53/go.mod h1:Eo1RyCJsx3ldz/YTY5yGxu9g9mwTYbRUutxQUkow3Fc= -github.com/docker/scout-cli v0.17.0 h1:fYFEMqhjVxbbKu48Djogwrn/lB1D/CJqnq/gDYfSs38= -github.com/docker/scout-cli v0.17.0/go.mod h1:Eo1RyCJsx3ldz/YTY5yGxu9g9mwTYbRUutxQUkow3Fc= -github.com/docker/scout-cli v0.19.0 h1:PGpAqercDHC4M0KsQwP+txk0sG+VVB23njuFurOf8Vw= -github.com/docker/scout-cli v0.19.0/go.mod h1:Eo1RyCJsx3ldz/YTY5yGxu9g9mwTYbRUutxQUkow3Fc= -github.com/docker/scout-cli v0.20.0 h1:+rj/uckRFs8vzQSxcWsVU4v1IiyWDcPvj8bMOxxYutI= -github.com/docker/scout-cli v0.20.0/go.mod h1:Eo1RyCJsx3ldz/YTY5yGxu9g9mwTYbRUutxQUkow3Fc= -github.com/docker/scout-cli v0.22.3 h1:STf1Oq0+PSVaWEwNZ9UleHTEZ0JUP1py6eQaRK0qivA= -github.com/docker/scout-cli v0.22.3/go.mod h1:Eo1RyCJsx3ldz/YTY5yGxu9g9mwTYbRUutxQUkow3Fc= -github.com/docker/scout-cli v0.23.0 h1:oFCNiO+11WhcnsXj3MF6uqJD49oBPMdJ3Pdq9XfZa4I= -github.com/docker/scout-cli v0.23.0/go.mod h1:Eo1RyCJsx3ldz/YTY5yGxu9g9mwTYbRUutxQUkow3Fc= -github.com/docker/scout-cli v0.23.3 h1:ToQ/Gw1clQ2GJ47Yt0HCefJB55oPOHZYH6rVxGdfF7Y= -github.com/docker/scout-cli v0.23.3/go.mod h1:Eo1RyCJsx3ldz/YTY5yGxu9g9mwTYbRUutxQUkow3Fc= -github.com/docker/scout-cli v0.24.1 h1:ga1J6dsKXfhBQ98wKbb+GWncuMdqErxhpLMxPSMqH+g= -github.com/docker/scout-cli v0.24.1/go.mod h1:Eo1RyCJsx3ldz/YTY5yGxu9g9mwTYbRUutxQUkow3Fc= -github.com/docker/scout-cli v1.0.2 h1:KweJ2S/WXncRIv+9+GrNI4bq/5TjcWY8WyWqgfV1zdM= -github.com/docker/scout-cli v1.0.2/go.mod h1:Eo1RyCJsx3ldz/YTY5yGxu9g9mwTYbRUutxQUkow3Fc= -github.com/docker/scout-cli v1.0.9 h1:P2Rs+HhVOIoSJZ1fcVuSDaxvV/8dCJTFdb3shrQtj5E= -github.com/docker/scout-cli v1.0.9/go.mod h1:Eo1RyCJsx3ldz/YTY5yGxu9g9mwTYbRUutxQUkow3Fc= -github.com/docker/scout-cli v1.2.0 h1:cjtsf7s2f6NO9OxgXWPW3DGxaTKVU58JKmVtaVMc0RA= -github.com/docker/scout-cli v1.2.0/go.mod h1:Eo1RyCJsx3ldz/YTY5yGxu9g9mwTYbRUutxQUkow3Fc= -github.com/docker/scout-cli v1.3.0 h1:mL9y1yB/DR/dAar71z0w8u8et9o2272Mrjxtb59ds3M= -github.com/docker/scout-cli v1.3.0/go.mod h1:Eo1RyCJsx3ldz/YTY5yGxu9g9mwTYbRUutxQUkow3Fc= -github.com/docker/scout-cli v1.4.1 h1:jRHO3LI3x2eMrvObKC6uadoRATbwZSXm1NafSzo9Cu4= -github.com/docker/scout-cli v1.4.1/go.mod h1:Eo1RyCJsx3ldz/YTY5yGxu9g9mwTYbRUutxQUkow3Fc= -github.com/docker/scout-cli v1.6.0 h1:07Kn2d/AshUSUk64ArZzE31lj4h7waGi8tjrFXxMZLY= -github.com/docker/scout-cli v1.6.0/go.mod h1:Eo1RyCJsx3ldz/YTY5yGxu9g9mwTYbRUutxQUkow3Fc= -github.com/docker/scout-cli v1.7.0 h1:2dEbQKqkxM6wsJab/Ma3EJacS9ZrkVs1C4KbjXggJjY= -github.com/docker/scout-cli v1.7.0/go.mod h1:Eo1RyCJsx3ldz/YTY5yGxu9g9mwTYbRUutxQUkow3Fc= -github.com/docker/scout-cli v1.8.0 h1:rxwU9Xzt1LhqSY37ZVe/GPRCQxrEaQNipOMpCrUdGns= -github.com/docker/scout-cli v1.8.0/go.mod h1:Eo1RyCJsx3ldz/YTY5yGxu9g9mwTYbRUutxQUkow3Fc= -github.com/docker/scout-cli v1.9.3 h1:u3lKQ7A1EvT3qNe5lR2c8dTNcAGIoSmH8HvSYarLlJY= -github.com/docker/scout-cli v1.9.3/go.mod h1:Eo1RyCJsx3ldz/YTY5yGxu9g9mwTYbRUutxQUkow3Fc= -github.com/docker/scout-cli v1.10.0 h1:C8Gm+6Oc7NqhtZ/UoACv3N2LaP1jqkhlIDRhBOqMBng= -github.com/docker/scout-cli v1.10.0/go.mod h1:Eo1RyCJsx3ldz/YTY5yGxu9g9mwTYbRUutxQUkow3Fc= -github.com/docker/scout-cli v1.11.0 h1:I310kNhjw3oeKe8T1cQEh6yPgy6VtpuwzjWchETn8KU= -github.com/docker/scout-cli v1.11.0/go.mod h1:Eo1RyCJsx3ldz/YTY5yGxu9g9mwTYbRUutxQUkow3Fc= -github.com/docker/scout-cli v1.12.0 h1:NhmT4BzL2lYiIk5hPFvK5FzQ8izbLDL3/Rugcyulv1M= -github.com/docker/scout-cli v1.12.0/go.mod h1:Eo1RyCJsx3ldz/YTY5yGxu9g9mwTYbRUutxQUkow3Fc= -github.com/docker/scout-cli v1.13.0 h1:RThUM56yooV5izqgMEYQS+a6Yx+vGmZofJwX0qjgkco= -github.com/docker/scout-cli v1.13.0/go.mod h1:Eo1RyCJsx3ldz/YTY5yGxu9g9mwTYbRUutxQUkow3Fc= +github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v28.2.2+incompatible h1:CjwRSksz8Yo4+RmQ339Dp/D2tGO5JxwYeqtMOEe0LDw= +github.com/docker/docker v28.2.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v28.3.0+incompatible h1:ffS62aKWupCWdvcee7nBU9fhnmknOqDPaJAMtfK0ImQ= +github.com/docker/docker v28.3.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v28.3.1+incompatible h1:20+BmuA9FXlCX4ByQ0vYJcUEnOmRM6XljDnFWR+jCyY= +github.com/docker/docker v28.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8= +github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/model-cli v0.1.26-0.20250527144806-15d0078a3c01 h1:UL/07fs6IEdRWWkK+GRvmSTZM+9fugWzEeo2vdGIPoE= +github.com/docker/model-cli v0.1.26-0.20250527144806-15d0078a3c01/go.mod h1:1YlKTiA19vEhbmM8EiJVPUFvRifBBI1S3sBpOt6Gbl4= +github.com/docker/model-cli v0.1.26-0.20250529165100-f4b458125149 h1:uOLJ8d/isN/mqvr5rEFHVL3pBAWvnjfEEcvPLHJ2JSI= +github.com/docker/model-cli v0.1.26-0.20250529165100-f4b458125149/go.mod h1:1YlKTiA19vEhbmM8EiJVPUFvRifBBI1S3sBpOt6Gbl4= +github.com/docker/model-cli v0.1.32 h1:iBYi2SS8ubv18wbhN04cKlds6Bc7VoEKJ11S+R0eFGo= +github.com/docker/model-cli v0.1.32/go.mod h1:2w/B+oBs0aEPbmfdGM+NKy/HURJGDAzECTIKiRaj5Rg= +github.com/docker/model-cli v0.1.33-0.20250703103301-d4e4936a9eb2 h1:gYGGGdufX1uPWCYUDYO05nKWKBsJxvwvYlxMT0Yk74Y= +github.com/docker/model-cli v0.1.33-0.20250703103301-d4e4936a9eb2/go.mod h1:2w/B+oBs0aEPbmfdGM+NKy/HURJGDAzECTIKiRaj5Rg= +github.com/docker/model-distribution v0.0.0-20250512190053-b3792c042d57 h1:ZqfKknb+0/uJid8XLFwSl/osjE+WuS6o6I3dh3ZqO4U= +github.com/docker/model-distribution v0.0.0-20250512190053-b3792c042d57/go.mod h1:dThpO9JoG5Px3i+rTluAeZcqLGw8C0qepuEL4gL2o/c= +github.com/docker/model-distribution v0.0.0-20250627163720-aff34abcf3e0 h1:bve4JZI06Admw+NewtPfrpJXsvRnGKTQvBOEICNC1C0= +github.com/docker/model-distribution v0.0.0-20250627163720-aff34abcf3e0/go.mod h1:dThpO9JoG5Px3i+rTluAeZcqLGw8C0qepuEL4gL2o/c= +github.com/docker/model-runner v0.0.0-20250512190413-96af7b750f88 h1:NkiizYL67HsCnnlEU6BQVoeiC1bAAyJFxw02bO7JC4E= +github.com/docker/model-runner v0.0.0-20250512190413-96af7b750f88/go.mod h1:Nw+rx6RRPNdProEb9/BVJyAQn63px6WWlOv+eEpkV7Q= +github.com/docker/model-runner v0.0.0-20250627142917-26a0a73fbbc0 h1:yajuhlGe1xhpWW3eMehQi2RrqiBQiGoi6c6OWiPxMaQ= +github.com/docker/model-runner v0.0.0-20250627142917-26a0a73fbbc0/go.mod h1:vZJiUZH/7O1CyNsEGi1o4khUT4DVRjcwluuamU9fhuM= github.com/docker/scout-cli v1.15.0 h1:VhA9niVftEyZ9f5KGwKnrSfQOp2X3uIU3VbE/gTVMTM= github.com/docker/scout-cli v1.15.0/go.mod h1:Eo1RyCJsx3ldz/YTY5yGxu9g9mwTYbRUutxQUkow3Fc= -github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= -github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= +github.com/docker/scout-cli v1.18.1 h1:snFodhV6xFJryxdUZ0ukPZFZZFnWAGLUuuPZGB3BOK8= +github.com/docker/scout-cli v1.18.1/go.mod h1:Eo1RyCJsx3ldz/YTY5yGxu9g9mwTYbRUutxQUkow3Fc= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fvbommel/sortorder v1.1.0 h1:fUmoe+HLsBTctBDoaBwpQo5N+nrCp8g/BjKb/6ZQmYw= +github.com/fvbommel/sortorder v1.1.0/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= +github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= -github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cty-funcs v0.0.0-20200930094925-2721b1e36840/go.mod h1:Abjk0jbRkDaNCzsRhOv2iDCofYpX1eVsjozoiK63qLA= -github.com/hashicorp/hcl/v2 v2.8.2/go.mod h1:bQTN5mpo+jewjJgh8jr0JUguIi7qPHUF6yIfAEN3jqY= -github.com/jaguilar/vt100 v0.0.0-20150826170717-2703a27b14ea/go.mod h1:QMdK4dGB3YhEW2BmA1wgGpPYI3HZy/5gD705PXKUVSg= -github.com/jinzhu/gorm v1.9.2/go.mod h1:Vla75njaFJ8clLU1W44h34PjIkijhjHIYnZxMqCdxqo= -github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= -github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= -github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/google/go-containerregistry v0.20.3 h1:oNx7IdTI936V8CQRveCjaxOiegWwvM7kqkbXTpyiovI= +github.com/google/go-containerregistry v0.20.3/go.mod h1:w00pIgBRDVUDFM6bq+Qx8lwNWK+cxgCuX1vd3PIBDNI= +github.com/google/go-containerregistry v0.20.6 h1:cvWX87UxxLgaH76b4hIvya6Dzz9qHB31qAwjAohdSTU= +github.com/google/go-containerregistry v0.20.6/go.mod h1:T0x8MuoAoKX/873bkeSfLD2FAkwCDf9/HZgsFJ02E2Y= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gpustack/gguf-parser-go v0.14.1 h1:tmz2eTnSEFfE52V10FESqo9oAUquZ6JKQFntWC/wrEg= +github.com/gpustack/gguf-parser-go v0.14.1/go.mod h1:GvHh1Kvvq5ojCOsJ5UpwiJJmIjFw3Qk5cW7R+CZ3IJo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 h1:e9Rjr40Z98/clHv5Yg79Is0NtosR5LXRvdr7o/6NwbA= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1/go.mod h1:tIxuGz/9mpox++sgp9fJjHO0+q1X9/UOWd798aAm22M= +github.com/henvic/httpretty v0.1.4 h1:Jo7uwIRWVFxkqOnErcoYfH90o3ddQyVrSANeS4cxYmU= +github.com/henvic/httpretty v0.1.4/go.mod h1:Dn60sQTZfbt2dYsdUSNsCljyF4AfdqnuJFDLJA1I4AM= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jaypipes/ghw v0.17.0 h1:EVLJeNcy5z6GK/Lqby0EhBpynZo+ayl8iJWY0kbEUJA= +github.com/jaypipes/ghw v0.17.0/go.mod h1:In8SsaDqlb1oTyrbmTC14uy+fbBMvp+xdqX51MidlD8= +github.com/jaypipes/pcidb v1.0.1 h1:WB2zh27T3nwg8AE8ei81sNRb9yWBii3JGNJtT7K9Oic= +github.com/jaypipes/pcidb v1.0.1/go.mod h1:6xYUz/yYEyOkIkUt2t2J2folIuZ4Yg6uByCGFXMCeE4= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= +github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/moby/buildkit v0.11.6 h1:VYNdoKk5TVxN7k4RvZgdeM4GOyRvIi4Z8MXOY7xvyUs= -github.com/moby/buildkit v0.11.6/go.mod h1:GCqKfHhz+pddzfgaR7WmHVEE3nKKZMMDPpK8mh3ZLv4= -github.com/moby/buildkit v0.12.0 h1:hgPDVSeondFLb28cBtRR5O0N4t8uWGJ4YNukT2aICIs= -github.com/moby/buildkit v0.12.0/go.mod h1:+n9GmkxwBCjVz4u7wmiyh+oqvjIjQM+1zk3iJrWfdos= -github.com/moby/buildkit v0.12.1-0.20230717122532-faa0cc7da353 h1:/ZIwqvOF3QKObJbjX96xVvAKtnWdw/AuEqysbbujaZA= -github.com/moby/buildkit v0.12.1-0.20230717122532-faa0cc7da353/go.mod h1:+n9GmkxwBCjVz4u7wmiyh+oqvjIjQM+1zk3iJrWfdos= -github.com/moby/buildkit v0.12.1-0.20230824004934-4376f3861b05 h1:oXcA1w1cswNzFW5TH5QoaAJ2zskZlFNsL8IHo28G3Os= -github.com/moby/buildkit v0.12.1-0.20230824004934-4376f3861b05/go.mod h1:BIvNtlrvok2xTC734ZNhQVGayvMB1Dz8bFuArWTLnnM= -github.com/moby/buildkit v0.12.1-0.20230830200556-05eb7287534b h1:VzIGQGWGnrDbzcQSJ28qTUAbNEtmszzuhUrzoqE/52Q= -github.com/moby/buildkit v0.12.1-0.20230830200556-05eb7287534b/go.mod h1:7/l0VKIyp1hBcGZF2hRpfBgvc0beQ9/hBWw7S+1JM0s= -github.com/moby/buildkit v0.12.1 h1:vvMG7EZYCiQZpTtXQkvyeyj7HzT1JHhDWj+/aiGIzLM= -github.com/moby/buildkit v0.12.1/go.mod h1:adB4y0SxxX8trnrY+oEulb48ODLqPO6pKMF0ppGcCoI= -github.com/moby/buildkit v0.12.2 h1:B7guBgY6sfk4dBlv/ORUxyYlp0UojYaYyATgtNwSCXc= -github.com/moby/buildkit v0.12.2/go.mod h1:adB4y0SxxX8trnrY+oEulb48ODLqPO6pKMF0ppGcCoI= -github.com/moby/buildkit v0.13.0-beta1.0.20231011042751-9ef1ed946118 h1:pqpcLt3wJTqBEY8Va3QQvd+taaBTEmK2+1kV7LKc69k= -github.com/moby/buildkit v0.13.0-beta1.0.20231011042751-9ef1ed946118/go.mod h1:oSHnUZH7sNtAFLyeN1syf46SuzMThKsCQaioNEqJVUk= -github.com/moby/buildkit v0.13.0-beta1.0.20231011101155-c444964c2e8f h1:CEiXZq08D7vLOnEDl7XY95zbupdWOJrRLb1VeZ+Hxq8= -github.com/moby/buildkit v0.13.0-beta1.0.20231011101155-c444964c2e8f/go.mod h1:oSHnUZH7sNtAFLyeN1syf46SuzMThKsCQaioNEqJVUk= -github.com/moby/buildkit v0.13.0-beta1.0.20231113205014-1efcd30d9dd6 h1:gfbjHMadWpzz9Jbbo4l73lrkNrP2YvNsKIIg8e5Ra4s= -github.com/moby/buildkit v0.13.0-beta1.0.20231113205014-1efcd30d9dd6/go.mod h1:VE6gCOYRW2hbxnxtt7udKkYMF73YdvkgIrGhkB0EiDA= -github.com/moby/buildkit v0.13.0-beta1.0.20231214000015-a960fe501f00 h1:Ymp+x/hsr6M6R+6j4XVyGaRrhAt1MnGXoN+ZkQ+TuuA= -github.com/moby/buildkit v0.13.0-beta1.0.20231214000015-a960fe501f00/go.mod h1:6MddWPSL5jxy+W8eMMHWDOfZzzRRKWXPZqajw72YHBc= -github.com/moby/buildkit v0.13.0-beta1.0.20231219135447-957cb50df991 h1:r80LLQ91uOLxU1ElAvrB1o8oBsph51lPzVnr7t2b200= -github.com/moby/buildkit v0.13.0-beta1.0.20231219135447-957cb50df991/go.mod h1:6MddWPSL5jxy+W8eMMHWDOfZzzRRKWXPZqajw72YHBc= -github.com/moby/buildkit v0.13.0-beta1.0.20240116143623-28ce478b1fde h1:t6dpbzyD4GYAX3zlm0s0+uH8xxx2UqF9uW9zuFIr+vg= -github.com/moby/buildkit v0.13.0-beta1.0.20240116143623-28ce478b1fde/go.mod h1:NK6kY+05bXjxhEmtGEMAwvSJ19gagBukPz6N4FFzlNs= -github.com/moby/buildkit v0.13.0-beta1.0.20240126101002-6bd81372ad6f h1:weCt2sfZGVAeThzpVyv4ibC0oFfvSxtbiTE7W77wXpc= -github.com/moby/buildkit v0.13.0-beta1.0.20240126101002-6bd81372ad6f/go.mod h1:vEcIVw63dZyhTgbcyQWXlZrtrKnvFoSI8LhfV+Vj0Jg= -github.com/moby/buildkit v0.13.0-beta3.0.20240201135300-d906167d0b34 h1:9oIm9T7YyDxRAXvP7y605G3TZmPGZjFvRHbbMJcIDy8= -github.com/moby/buildkit v0.13.0-beta3.0.20240201135300-d906167d0b34/go.mod h1:tSWWhq1EDM0eB3ngMNDiH2hOOW9fXTyn2uXuOraCLlE= -github.com/moby/buildkit v0.13.0-rc3.0.20240307012628-5a4c2975457b h1:lMLGJ3ErbAa5eGsVj7CkmN/2ByyyUFs3abfX99+C4pA= -github.com/moby/buildkit v0.13.0-rc3.0.20240307012628-5a4c2975457b/go.mod h1:P5zIr3pyh1VQoK751o5JFtogepVcLi9+77PTfmvJwls= -github.com/moby/buildkit v0.13.0-rc3.0.20240307092343-22d4212fed7e h1:lEQehVlOgEMJ6bZvx3TWFjFE9Cic4fWJplNNQtYUX/A= -github.com/moby/buildkit v0.13.0-rc3.0.20240307092343-22d4212fed7e/go.mod h1:P5zIr3pyh1VQoK751o5JFtogepVcLi9+77PTfmvJwls= -github.com/moby/buildkit v0.13.0-rc3.0.20240308080452-a38011b9f57d h1:q8sI5enL3NBniNUIeVyrbUj6WCSc0gg+tAQgX1m6oTM= -github.com/moby/buildkit v0.13.0-rc3.0.20240308080452-a38011b9f57d/go.mod h1:P5zIr3pyh1VQoK751o5JFtogepVcLi9+77PTfmvJwls= -github.com/moby/buildkit v0.13.0-rc3.0.20240402103816-7cd12732690e h1:+hA09x+9xK3KoXtxNFHiJxBbJrpVr/7UR221F2+pG9w= -github.com/moby/buildkit v0.13.0-rc3.0.20240402103816-7cd12732690e/go.mod h1:ij4XbVmcwOPQdTJQeO6341hqzvlw10kkuSsT36suSrk= -github.com/moby/buildkit v0.13.0-rc3.0.20240424175633-5fce077ed0e0 h1:wTJCJDC1woYunMCVd4qKvfJ4esNPYNBIW1459+FR1cA= -github.com/moby/buildkit v0.13.0-rc3.0.20240424175633-5fce077ed0e0/go.mod h1:wH5RTVyFjMQ67euC1e3UUSw7yQe7JkAHmf8OZkQY7Y4= -github.com/moby/buildkit v0.13.0 h1:reVR1Y+rbNIUQ9jf0Q1YZVH5a/nhOixZsl+HJ9qQEGI= -github.com/moby/buildkit v0.13.0/go.mod h1:aNmNQKLBFYAOFuzQjR3VA27/FijlvtBD1pjNwTSN37k= -github.com/moby/buildkit v0.14.0-rc2 h1:qvl0hOKeyAWReOkksNtstQjPNaAD4jN3Dvq4r7slqYM= -github.com/moby/buildkit v0.14.0-rc2/go.mod h1:/ZJNHNVso1nf063XlDhEkNEcRNW19utVpUKixCUo9Ks= -github.com/moby/buildkit v0.14.0-rc2.0.20240610193248-7da4d591c4dc h1:D/QzYP+52V4IzxMvcWe8ppgg0XptfI4/JCd7ry79gqY= -github.com/moby/buildkit v0.14.0-rc2.0.20240610193248-7da4d591c4dc/go.mod h1:1XssG7cAqv5Bz1xcGMxJL123iCv5TYN4Z/qf647gfuk= -github.com/moby/buildkit v0.14.0-rc2.0.20240611065153-eed17a45c62b h1:n06ACmuRYPZLR6DbQvVPDRGvqWK7gGCRJjMEzGTemzs= -github.com/moby/buildkit v0.14.0-rc2.0.20240611065153-eed17a45c62b/go.mod h1:1XssG7cAqv5Bz1xcGMxJL123iCv5TYN4Z/qf647gfuk= -github.com/moby/buildkit v0.15.1 h1:J6wrew7hphKqlq1wuu6yaUb/1Ra7gEzDAovylGztAKM= -github.com/moby/buildkit v0.15.1/go.mod h1:Yis8ZMUJTHX9XhH9zVyK2igqSHV3sxi3UN0uztZocZk= -github.com/moby/buildkit v0.16.0 h1:wOVBj1o5YNVad/txPQNXUXdelm7Hs/i0PUFjzbK0VKE= -github.com/moby/buildkit v0.16.0/go.mod h1:Xqx/5GlrqE1yIRORk0NSCVDFpQAU1WjlT6KHYZdisIQ= -github.com/moby/buildkit v0.17.0 h1:ZA/4AxwBbve1f3ZaNNJQiCBtTV62R6YweWNwq4A+sTc= -github.com/moby/buildkit v0.17.0/go.mod h1:ru8NFyDHD8HbuKaLXJIjK9nr3x6FZR+IWjtF07S+wdM= -github.com/moby/buildkit v0.17.1-0.20241031124041-354f2d13c905 h1:KMEmQThIQYXKvBurcvM+6zZjxP2CoNSsF/wUpW+RC/E= -github.com/moby/buildkit v0.17.1-0.20241031124041-354f2d13c905/go.mod h1:ru8NFyDHD8HbuKaLXJIjK9nr3x6FZR+IWjtF07S+wdM= -github.com/moby/buildkit v0.18.0 h1:KSelhNINJcNA3FCWBbGCytvicjP+kjU5kZlZhkTUkVo= -github.com/moby/buildkit v0.18.0/go.mod h1:vCR5CX8NGsPTthTg681+9kdmfvkvqJBXEv71GZe5msU= -github.com/moby/buildkit v0.19.0 h1:w9G1p7sArvCGNkpWstAqJfRQTXBKukMyMK1bsah1HNo= -github.com/moby/buildkit v0.19.0/go.mod h1:WiHBFTgWV8eB1AmPxIWsAlKjUACAwm3X/14xOV4VWew= +github.com/moby/buildkit v0.22.0 h1:aWN06w1YGSVN1XfeZbj2ZbgY+zi5xDAjEFI8Cy9fTjA= +github.com/moby/buildkit v0.22.0/go.mod h1:j4pP5hxiTWcz7xuTK2cyxQislHl/N2WWHzOy43DlLJw= +github.com/moby/buildkit v0.23.2 h1:gt/dkfcpgTXKx+B9I310kV767hhVqTvEyxGgI3mqsGQ= +github.com/moby/buildkit v0.23.2/go.mod h1:iEjAfPQKIuO+8y6OcInInvzqTMiKMbb2RdJz1K/95a0= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= -github.com/moby/moby v24.0.2+incompatible h1:yH+5dRHH1x3XRKzl1THA2aGTy6CHYnkt5N924ADMax8= -github.com/moby/moby v24.0.2+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= -github.com/moby/moby v24.0.4+incompatible h1:20Bf1sfJpspHMAUrxRFplG31Sriaw7Z9/jUEuJk6mqI= -github.com/moby/moby v24.0.4+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= -github.com/moby/moby v24.0.5+incompatible h1:uUbydai/Y9J7Ybt+lFI3zBdnsMYXnXE9vEcfZDoEE8Q= -github.com/moby/moby v24.0.5+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= -github.com/moby/moby v24.0.8-0.20240109122856-854ca341c0f6+incompatible h1:bwE6hpc+Kq+UhTMUOdepQYXDBIqQENvj/LuuRJmTpAs= -github.com/moby/moby v24.0.8-0.20240109122856-854ca341c0f6+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= -github.com/moby/moby v25.0.0+incompatible h1:KIFudkwXNK+kBrnCxWZNwhEf/jJzdjQAP7EF/awywMI= -github.com/moby/moby v25.0.0+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= -github.com/moby/moby v25.0.3-0.20240203133757-341a7978a541+incompatible h1:0Vgi62q5Zo4E0wl1ZBj8bRq9rZeOGK+xwz1SBr3Naz8= -github.com/moby/moby v25.0.3-0.20240203133757-341a7978a541+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= -github.com/moby/moby v25.0.4+incompatible h1:vea1J80wDM5x5geaZSaywFkfFxLABJIQ3mmR4ewZGbU= -github.com/moby/moby v25.0.4+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= -github.com/moby/moby v26.0.0+incompatible h1:2n9/cIWkxiEI1VsWgTGgXhxIWUbv42PyxEP9L+RReC0= -github.com/moby/moby v26.0.0+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= -github.com/moby/moby v26.1.0+incompatible h1:mjepCwMH0KpCgPvrXjqqyCeTCHgzO7p9TwZ2nQMI2qU= -github.com/moby/moby v26.1.0+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= -github.com/moby/moby v26.1.2+incompatible h1:yOGzOkmMRtkhyySHHRH9dWOK/rlrmZR/cVnMGqlynzw= -github.com/moby/moby v26.1.2+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= -github.com/moby/moby v27.0.1+incompatible h1:eWAkDZQMCcedwjpxh4hbDV/ktQG2QL41PuO7Bm4xWU4= -github.com/moby/moby v27.0.1+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= -github.com/moby/moby v27.0.3+incompatible h1:lnUi7z7EFl1VkcahJOdvkI5QDEHJyib4CHbQK3MCQsw= -github.com/moby/moby v27.0.3+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= -github.com/moby/moby v27.2.0+incompatible h1:WX2CjnXfZ8V87ugEIJuwVp7fDhHXCdi7gjlEQgcLE8I= -github.com/moby/moby v27.2.0+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= -github.com/moby/moby v27.2.1+incompatible h1:mIRBoOsLr+Q6s+h65ZFyi6cXBEVy2RXCWS5HOHlxx54= -github.com/moby/moby v27.2.1+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= -github.com/moby/moby v27.3.0+incompatible h1:AhSu/R7C5uiyd+JCts3kxrKyTzXa3FilBJ0KCLUHXqA= -github.com/moby/moby v27.3.0+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= -github.com/moby/moby v27.3.1+incompatible h1:KQbXBjo7PavKpzIl7UkHT31y9lw/e71Uvrqhr4X+zMA= -github.com/moby/moby v27.3.1+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= -github.com/moby/moby v27.4.0+incompatible h1:jGXXZCMAmFZS9pKsQqUt9yAPHOC450PM9lbQYPSQnuc= -github.com/moby/moby v27.4.0+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= -github.com/moby/moby v27.5.0+incompatible h1:RuYLppjLxMzWmPUQAy/hkJ6pGcXsuVdcmIVFqVPegO8= -github.com/moby/moby v27.5.0+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= -github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/moby v28.2.1+incompatible h1:UYmHExYP8S0uGKDozhYw7RJ+LpANL51g4fa3qT0Q2GA= +github.com/moby/moby v28.2.1+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= +github.com/moby/moby v28.3.0+incompatible h1:BnZpCciB9dCnfNC+MerxqsHV4I6/gLiZIzzbRFJIhUY= +github.com/moby/moby v28.3.0+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= +github.com/moby/moby v28.3.2+incompatible h1:K0SaQiU3VJxzMmHarwIa9MUyYFYC6FzCf0Qs9oQaFI4= +github.com/moby/moby v28.3.2+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= +github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= +github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= +github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= +github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0-rc3 h1:fzg1mXZFj8YdPeNkRXMg+zb88BFV0Ys52cJydRwBkb8= -github.com/opencontainers/image-spec v1.1.0-rc3/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= -github.com/opencontainers/image-spec v1.1.0-rc4 h1:oOxKUJWnFC4YGHCCMNql1x4YaDfYBTS5Y4x/Cgeo1E0= -github.com/opencontainers/image-spec v1.1.0-rc4/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= -github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= -github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= -github.com/opencontainers/image-spec v1.1.0-rc6 h1:XDqvyKsJEbRtATzkgItUqBA7QHk58yxX1Ov9HERHNqU= -github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= -github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.5.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002/go.mod h1:/yeG0My1xr/u+HZrFQ1tOQQQQrOawfyMUH13ai5brBc= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/theupdateframework/notary v0.6.1/go.mod h1:MOfgIfmox8s7/7fduvB2xyPPMJCrjRLRizA8OFwpnKY= -github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea/go.mod h1:WPnis/6cRcDZSUvVmezrxJPkiO87ThFYsoUiMwWNDJk= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -gopkg.in/dancannon/gorethink.v3 v3.0.5/go.mod h1:GXsi1e3N2OcKhcP6nsYABTiUejbWMFO4GY5a4pEaeEc= -gopkg.in/fatih/pool.v2 v2.0.0/go.mod h1:8xVGeu1/2jr2wm5V9SPuMht2H5AEmf5aFMGSQixtjTY= -gopkg.in/gorethink/gorethink.v3 v3.0.5/go.mod h1:+3yIIHJUGMBK+wyPH+iN5TP+88ikFDfZdqTlK3Y9q8I= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= +github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rs/dnscache v0.0.0-20230804202142-fc85eb664529 h1:18kd+8ZUlt/ARXhljq+14TwAoKa61q6dX8jtwOf6DH8= +github.com/rs/dnscache v0.0.0-20230804202142-fc85eb664529/go.mod h1:qe5TWALJ8/a1Lqznoc5BDHpYX/8HU60Hm2AwRmqzxqA= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/smallnest/ringbuffer v0.0.0-20241116012123-461381446e3d h1:3VwvTjiRPA7cqtgOWddEL+JrcijMlXUmj99c/6YyZoY= +github.com/smallnest/ringbuffer v0.0.0-20241116012123-461381446e3d/go.mod h1:tAG61zBM1DYRaGIPloumExGvScf08oHuo0kFoOqdbT0= +github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= +github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo= +github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 h1:Hf9xI/XLML9ElpiHVDNwvqI0hIFlzV8dgIr35kV1kRU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0/go.mod h1:NfchwuyNoMcZ5MLHwPrODwUF1HWCXWrL31s8gSAdIKY= +go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= +go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= +go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= +go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.34.0 h1:ajl4QczuJVA2TU9W9AGw++86Xga/RKt//16z/yxPgdk= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.34.0/go.mod h1:Vn3/rlOJ3ntf/Q3zAI0V5lDnTbHGaUsNUeF6nZmm7pA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 h1:1fTNlAIJZGWLP5FVu0fikVry1IsiUnXjf7QFvoNN3Xw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0/go.mod h1:zjPK58DtkqQFn+YUMbx0M2XV3QgKU0gS9LeGohREyK4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 h1:m639+BofXTvcY1q8CGs4ItwQarYtJPOWmVobfM1HpVI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0/go.mod h1:LjReUci/F4BUyv+y4dwnq3h/26iNOeC3wAIqgvTIZVo= +go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= +go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= +go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= +go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= +go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= +go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg= +go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= +go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= +go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= +go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= +go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= +go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= +go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= +go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= +go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= +go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= +go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= +go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= +golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= +golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= +golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= +golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM= +golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= +golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= +golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= +golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= +golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= +golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= +golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= +golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= +golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= +golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.32.0 h1:Q7N1vhpkQv7ybVzLFtTjvQya2ewbwNDZzUgfXGqtMWU= +golang.org/x/tools v0.32.0/go.mod h1:ZxrU41P/wAbZD8EDa6dDCa6XfpkhJ7HFMjHJXfBDu8s= +golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo= +golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.15.1 h1:FNy7N6OUZVUaWG9pTiD+jlhdQ3lMP+/LcTpJ6+a8sQ0= +gonum.org/v1/gonum v0.15.1/go.mod h1:eZTZuRFrzu5pcyjN5wJhcIhnUdNijYxX1T2IcrOGY0o= +google.golang.org/genproto/googleapis/api v0.0.0-20250219182151-9fdb1cabc7b2 h1:35ZFtrCgaAjF7AFAK0+lRSf+4AyYnWRbH7og13p7rZ4= +google.golang.org/genproto/googleapis/api v0.0.0-20250219182151-9fdb1cabc7b2/go.mod h1:W9ynFDP/shebLB1Hl/ESTOap2jHd6pmLXPNZC7SVDbA= +google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 h1:hE3bRWtU6uceqlh4fhrSnUyjKHMKB9KrTLLG+bc0ddM= +google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463/go.mod h1:U90ffi8eUL9MwPcrJylN5+Mk2v3vuPDptd5yyNUiRR8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250219182151-9fdb1cabc7b2 h1:DMTIbak9GhdaSxEjvVzAeNZvyc03I61duqNbnm3SU0M= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250219182151-9fdb1cabc7b2/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 h1:e0AIkUUhxyBKh6ssZNrAMeqhA7RKUj42346d1y02i2g= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e h1:ztQaXfzEXTmCBvbtWYRhJxW+0iJcz2qXfd38/e9l7bA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/grpc v1.72.1 h1:HR03wO6eyZ7lknl75XlxABNVLLFc2PAb6mHlYh756mA= +google.golang.org/grpc v1.72.1/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= +google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok= +google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +howett.net/plist v1.0.1 h1:37GdZ8tP09Q35o9ych3ehygcsL+HqKSwzctveSlarvM= +howett.net/plist v1.0.1/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= diff --git a/hack/releaser/Dockerfile b/hack/releaser/Dockerfile index 11c574d173a0..90687cf448b7 100644 --- a/hack/releaser/Dockerfile +++ b/hack/releaser/Dockerfile @@ -1,6 +1,6 @@ # syntax=docker/dockerfile:1 -ARG GO_VERSION=1.23 +ARG GO_VERSION=1.24 FROM scratch AS sitedir diff --git a/hack/releaser/go.mod b/hack/releaser/go.mod index d17c092280c4..0e1396febd74 100644 --- a/hack/releaser/go.mod +++ b/hack/releaser/go.mod @@ -1,6 +1,6 @@ module github.com/docker/docs/hack/releaser -go 1.22 +go 1.24.0 require ( github.com/alecthomas/kong v1.4.0 diff --git a/hugo.yaml b/hugo.yaml index 36c6795f4666..ac176ec6ad7e 100644 --- a/hugo.yaml +++ b/hugo.yaml @@ -4,6 +4,7 @@ refLinksErrorLevel: ERROR enableGitInfo: true disablePathToLower: true enableInlineShortcodes: true +disableHugoGeneratorInject: true ignoreLogs: - cascade-pattern-with-extension @@ -45,8 +46,8 @@ build: disableTags: true # Ensure that CSS/assets changes trigger a dev server rebuild cachebusters: - - source: assets/watching/hugo_stats\.json - target: styles\.css + - source: assets/notwatching/hugo_stats\.json + target: css - source: (postcss|tailwind)\.config\.js target: css - source: assets/.*\.js @@ -74,8 +75,22 @@ outputFormats: isPlainText: true mediaType: "text/plain" notAlternative: true + # Markdown for LLMs, see layouts/_default/single.markdown.md + Markdown: + baseName: index + mediaType: "text/markdown" + isPlainText: true + isHTML: false + permalinkable: false + # llms.txt + llms: + baseName: llms + isPlainText: true + mediaType: "text/plain" + notAlternative: true + permalinkable: false -# Enable custom output formats for the home page only +# Enable custom output formats # (only generate the custom output files once) outputs: home: @@ -83,6 +98,13 @@ outputs: - redirects - metadata - robots + - llms + page: + - html + - Markdown + section: + - html + - Markdown languages: en: @@ -111,33 +133,22 @@ params: # Use `grep` to figure out how they might be used. # Latest version of the Docker Engine API - latest_engine_api_version: "1.47" + latest_engine_api_version: "1.51" # Latest version of Docker Engine - docker_ce_version: "27.5.0" + docker_ce_version: "28.3.2" # Previous version of the Docker Engine # (Used to show e.g., "latest" and "latest"-1 in engine install examples - docker_ce_version_prev: "27.4.1" + docker_ce_version_prev: "28.3.1" # Latest Docker Compose version - compose_version: "v2.32.4" + compose_version: "v2.38.2" # Latest BuildKit version - buildkit_version: "0.16.0" + buildkit_version: "0.23.2" # Example runtime/library/os versions - example_go_version: "1.23" + example_go_version: "1.24" example_alpine_version: "3.21" example_node_version: "20" - # Minimum version thresholds (used together with the "introduced" shortcode - # See layouts/shortcodes/introduced.html - min_version_thresholds: - buildx: "0.10.0" - buildkit: "0.11.0" - engine: "24.0.0" - api: "1.41" - desktop: "4.20.0" - compose: "2.14.0" - scout: "1.0.0" - menus: # Site header menu main: @@ -267,8 +278,9 @@ module: - source: assets target: assets # Mount hugo_stats.json to the assets dir to trigger cachebust - - source: hugo_stats.json - target: assets/watching/hugo_stats.json + - disableWatch: true + source: hugo_stats.json + target: assets/notwatching/hugo_stats.json # Mount the icon files to assets so we can access them with resources.Get - source: node_modules/@material-symbols/svg-400/rounded target: assets/icons @@ -330,6 +342,13 @@ module: target: data/compose-cli includeFiles: "*.yaml" + # Model CLI + - path: github.com/docker/model-cli + mounts: + - source: docs/reference + target: data/model-cli + includeFiles: "*.yaml" + # Scout CLI plugin (public dist repo) - path: github.com/docker/scout-cli mounts: diff --git a/hugo_stats.json b/hugo_stats.json index 057958e87d5c..861a159230b6 100644 --- a/hugo_stats.json +++ b/hugo_stats.json @@ -2,18 +2,17 @@ "htmlElements": { "tags": null, "classes": [ - "!mt-0", "--mount", "--tmpfs", - "-mb-3", - "-mr-8", + "-mr-20", "-mt-0.5", - "-mt-4", "-mt-8", + "-top-10", "-top-16", "-v", "-z-10", ".NET", + "AWS-Route-53", "Admin-Console", "After", "Angular", @@ -25,8 +24,6 @@ "Bake", "Bash", "Before", - "BitBucket-Pipelines", - "Buildkite", "CLI", "CentOS-RHEL-and-Fedora", "Circle-CI", @@ -35,30 +32,40 @@ "Command-Prompt-CLI", "Compliant", "Custom-builder", + "DNS-resolution", "Debian", "Debian-GNU/Linux", "Diff", - "Docker-Compose", + "Docker-Build-Cloud", "Docker-Desktop", + "Docker-Engine", "Docker-Hub", + "Docker-Scout", "Docker-Scout-Dashboard", - "Docker-plan", + "Docker-subscription", "Download", + "Enable-for-a-given-project", + "Enable-globally", "Entra-ID", + "Entra-ID-OIDC", "Entra-ID-SAML-2.0", + "Entra-ID/Azure-AD-OIDC-and-SAML-2.0", + "Entra-ID/Azure-AD-SAML-2.0-and-OIDC", "External-cloud-storage", "Fedora", "For-Mac-with-Apple-silicon", "For-Mac-with-Intel-chip", - "For-all-platforms", + "From-Docker-Desktop", + "From-the-Docker-CLI", "From-the-GUI", "From-the-command-line", "GUI", "Git-Bash", "Git-Bash-CLI", - "GitHub-Actions", "GitLab", "Go", + "GoDaddy", + "Google-Cloud-DNS", "HTTP", "Heredocs", "Hyper-V-backend-x86_64", @@ -68,8 +75,8 @@ "JavaScript", "Jenkins", "Latest", - "Legacy-Docker-plan", "Legacy-Docker-plans", + "Legacy-Docker-subscription", "Linux", "Local-or-Hub-storage", "MDM", @@ -81,13 +88,18 @@ "Mac-and-Linux", "Mac-with-Apple-silicon", "Mac-with-Intel-chip", + "MacOS", "Manually-create-assets", "NetworkManager", + "Networking-mode", "Node", "Non-compliant", "Okta", "Okta-SAML", "Old-Dockerfile", + "On-Unix-environments", + "On-Windows", + "Other-providers", "PHP", "PowerShell", "PowerShell-CLI", @@ -95,6 +107,7 @@ "RHEL-8", "RHEL-9", "RHEL-CentOS-or-Fedora", + "RPM-base-distributions", "Raw", "React", "Regular-install", @@ -104,12 +117,14 @@ "Run-Ollama-in-a-container", "Run-Ollama-outside-of-a-container", "Rust", - "Shell", + "Separate-containers", "Shell-script", + "Single-container", "Specific-version", "Svelte", - "Travis-CI", + "Testcontainers-Cloud", "Ubuntu", + "Ubuntu/Debian", "Unix-pipe", "Updated-Dockerfile", "Use-Docker-Init", @@ -117,12 +132,8 @@ "Using-the-CLI", "Using-the-GUI", "VS-Code", - "Version-4.15-4.17", - "Version-4.17-and-earlier", - "Version-4.18-and-later", - "Versions-prior-to-4.15", "Vue", - "WSL-2-backend-Arm-Beta", + "WSL-2-backend-Arm-Early-Access", "WSL-2-backend-x86_64", "Web-browser", "What-are-the-key-features-of-Docker-Desktop", @@ -136,124 +147,142 @@ "With-systemd-Highly-recommended", "Without-packages", "Without-systemd", + "[display:none]", "absolute", + "admonition", + "admonition-content", + "admonition-danger", + "admonition-header", + "admonition-icon", + "admonition-note", + "admonition-tip", + "admonition-title", + "admonition-warning", "aspect-video", "bake-action", - "bg-amber-light", + "bg-amber-500", "bg-background-light", + "bg-background-toc", + "bg-black/100", "bg-black/50", - "bg-black/70", - "bg-blue-light", - "bg-blue-light-400", - "bg-blue-light-500", - "bg-cover", + "bg-blue", + "bg-blue-400", + "bg-blue-500", "bg-gradient-to-br", "bg-gradient-to-r", - "bg-gradient-to-t", - "bg-gray-light-100", - "bg-gray-light-200", - "bg-gray-light-400", - "bg-gray-light-700", - "bg-green-light", - "bg-green-light-400", + "bg-gray-100", + "bg-gray-400", + "bg-gray-50", + "bg-gray-700", + "bg-green-400", + "bg-green-500", + "bg-navbar-bg", "bg-pattern-blue", "bg-pattern-purple", "bg-pattern-verde", - "bg-red-light", + "bg-red-500", "bg-transparent", - "bg-violet-light", + "bg-violet-500", "bg-white", "bg-white/10", "block", "border", "border-0", - "border-amber-light", "border-b", "border-b-4", - "border-blue-light", - "border-blue-light-500", + "border-blue", "border-divider-light", - "border-gray-light-100", - "border-gray-light-200", - "border-gray-light-400", - "border-green-light", - "border-green-light-400", + "border-gray-200", + "border-gray-300", + "border-gray-400", + "border-green-400", "border-l-2", - "border-l-4", "border-l-magenta-light", - "border-red-light", "border-transparent", - "border-violet-light", "border-white", "bottom-0", + "breadcrumbs", "build-push-action", + "button", + "card", + "card-content", + "card-description", + "card-header", + "card-icon", + "card-img", + "card-link", + "card-title", + "chip", "chroma", + "cls-1", + "cls-2", "col-start-2", "containerd-image-store", "cursor-pointer", - "dark:bg-amber-dark", + "dark:bg-amber-400", "dark:bg-background-dark", - "dark:bg-blue-dark", - "dark:bg-blue-dark-400", - "dark:bg-gray-dark-100", - "dark:bg-gray-dark-200", - "dark:bg-gray-dark-300", - "dark:bg-gray-dark-400", - "dark:bg-green-dark", + "dark:bg-background-toc", + "dark:bg-blue", + "dark:bg-blue-400", + "dark:bg-blue-500", + "dark:bg-blue-800", + "dark:bg-gray-300", + "dark:bg-gray-500", + "dark:bg-gray-800", + "dark:bg-gray-900", + "dark:bg-gray-950", + "dark:bg-green-700", "dark:bg-green-dark-400", - "dark:bg-red-dark", - "dark:bg-violet-dark", + "dark:bg-navbar-bg-dark", + "dark:bg-red-400", + "dark:bg-violet-400", "dark:block", - "dark:border-amber-dark", - "dark:border-b-blue-dark-600", - "dark:border-blue-dark", + "dark:border-b-blue-600", "dark:border-divider-dark", - "dark:border-gray-dark-200", - "dark:border-gray-dark-400", - "dark:border-green-dark", - "dark:border-green-dark-400", + "dark:border-gray-100", + "dark:border-gray-400", + "dark:border-gray-50", + "dark:border-gray-700", + "dark:border-green-400", "dark:border-l-magenta-dark", - "dark:border-red-dark", - "dark:border-violet-dark", - "dark:fill-blue-dark", - "dark:focus:ring-blue-dark", - "dark:from-background-dark", - "dark:from-blue-dark-200", - "dark:from-blue-dark-400", - "dark:from-gray-dark-100", + "dark:fill-blue-300", + "dark:focus:ring-3-blue-dark", + "dark:from-blue-300", + "dark:from-blue-600", "dark:hidden", - "dark:hover:bg-blue-dark", - "dark:hover:bg-blue-dark-500", - "dark:hover:bg-gray-dark-200", - "dark:hover:bg-gray-dark-500", - "dark:hover:text-blue-dark", + "dark:hover:bg-blue-400", + "dark:hover:bg-blue-500", + "dark:hover:bg-blue-700", + "dark:hover:bg-gray-600", + "dark:hover:bg-gray-800", + "dark:hover:bg-gray-900", + "dark:hover:text-blue", + "dark:outline-gray-800", "dark:prose-invert", - "dark:ring-blue-dark-400", - "dark:ring-gray-dark-400", + "dark:ring-3-blue-dark-400", + "dark:ring-3-gray-dark-400", "dark:syntax-dark", - "dark:text-amber-dark", - "dark:text-blue-dark", + "dark:text-blue", + "dark:text-blue-700", "dark:text-divider-dark", - "dark:text-gray-dark", - "dark:text-gray-dark-300", - "dark:text-gray-dark-500", - "dark:text-gray-dark-600", - "dark:text-gray-dark-700", - "dark:text-gray-dark-800", - "dark:text-green-dark", + "dark:text-gray", + "dark:text-gray-100", + "dark:text-gray-200", + "dark:text-gray-300", + "dark:text-gray-400", + "dark:text-gray-500", "dark:text-magenta-dark", - "dark:text-red-dark", - "dark:text-violet-dark", "dark:text-white", - "dark:to-background-dark", - "dark:to-blue-dark-100", - "dark:to-magenta-dark-400", + "dark:to-blue-400", + "dark:to-blue-500", "docker/bake-action", "docker/build-push-action", + "download-links", + "download-links-subcontainer", "drop-shadow", - "drop-shadow-sm", + "dropdown-base", "duration-300", - "fill-blue-light", + "fill-blue", "fixed", "flex", "flex-1", @@ -261,38 +290,41 @@ "flex-col", "flex-col-reverse", "flex-grow", - "flex-grow-0", "flex-none", "flex-shrink", "flex-wrap", - "focus:ring-blue-light", + "focus:ring-3-blue-light", + "font-bold", "font-medium", + "font-normal", "font-semibold", "footnote-backref", "footnote-ref", "footnotes", - "from-20%", - "from-background-light", - "from-blue-light-400", - "from-blue-light-600", + "from-blue-400", + "from-blue-600", + "gap-0", "gap-1", "gap-10", "gap-12", "gap-2", + "gap-2.5", "gap-20", "gap-3", "gap-4", - "gap-6", "gap-8", "goat", "grid", "grid-cols-1", "group", "group-hover:block'", + "group-open:[display:block]", + "group-open:rotate-180", "h-16", "h-2", "h-32", "h-48", + "h-5", "h-6", "h-8", "h-[calc(100vh-64px)]", @@ -303,26 +335,29 @@ "hidden", "hidden'", "highlight", - "hover:bg-blue-light-400", - "hover:bg-gray-light-100", - "hover:bg-gray-light-200", - "hover:bg-gray-light-300", + "hover:bg-blue", + "hover:bg-blue-400", + "hover:bg-blue-500", + "hover:bg-gray-100", + "hover:bg-gray-200", + "hover:bg-gray-300", + "hover:bg-gray-50", "hover:bg-white/20", - "hover:border-gray-light-200", "hover:border-white/20", - "hover:dark:bg-gray-dark-200", - "hover:dark:bg-gray-dark-300", - "hover:dark:border-gray-dark", - "hover:dark:text-blue-dark", - "hover:drop-shadow-lg", + "hover:dark:bg-blue-500", + "hover:dark:bg-gray-300", + "hover:dark:bg-gray-800", + "hover:dark:text-blue-400", + "hover:dark:text-blue-700", "hover:opacity-90", - "hover:text-blue-light", + "hover:text-blue", "hover:text-white", "hover:underline", "icon-lg", "icon-sm", "icon-svg", "inline", + "inline-block", "inline-flex", "inset-0", "invertible", @@ -334,7 +369,9 @@ "justify-center", "justify-end", "justify-evenly", + "leading-none", "leading-snug", + "leading-tight", "left-0", "lg:block", "lg:flex", @@ -352,21 +389,22 @@ "lntable", "lntd", "m-2", - "m-4", "macOS", "max-h-full", "max-w-4xl", "max-w-56", "max-w-[1920px]", "max-w-[840px]", - "max-w-fit", "max-w-full", "max-w-none", "max-w-xl", "mb-1", + "mb-1.5", "mb-2", "mb-4", + "mb-6", "mb-8", + "md-dropdown", "md:block", "md:flex-nowrap", "md:flex-row", @@ -381,22 +419,33 @@ "md:text-sm", "md:top-16", "md:w-[300px]", + "md:w-[320px]", "md:z-auto", "min-h-screen", "min-w-0", + "min-w-48", "min-w-52", "min-w-fit", "ml-2", "ml-3", "ml-4", + "ml-auto", "mt-1", + "mt-1.5", "mt-2", "mt-20", "mt-4", + "mt-8", + "mt-[2px]", "mx-auto", "my-0", "my-4", "my-6", + "navbar-entry-background-current", + "navbar-entry-margin", + "navbar-font", + "navbar-group", + "navbar-group-font-title", "no-underline", "no-wrap", "not-prose", @@ -404,15 +453,20 @@ "open-kapa-widget", "openSUSE-and-SLES", "origin-bottom-right", + "origin-top-right", "ot-sdk-show-settings", - "outline-none", + "outline", + "outline-1", + "outline-gray-200", + "outline-hidden", + "outline-offset-[-1px]", "overflow-clip", "overflow-hidden", "overflow-x-auto", "overflow-x-hidden", "overflow-y-auto", - "p-1", "p-2", + "p-3", "p-4", "p-6", "p-8", @@ -421,9 +475,9 @@ "pb-0.5", "pb-1", "pb-2", + "pb-20", "pb-4", "pb-8", - "pl-1", "pl-2", "pl-3", "pl-4", @@ -432,12 +486,14 @@ "placeholder:text-white", "pr-2", "prose", + "pt-10", "pt-2", "pt-4", "px-1", "px-2", "px-4", "px-6", + "py-0.5", "py-1", "py-2", "py-20", @@ -445,83 +501,88 @@ "py-8", "relative", "right-0", - "right-3", + "right-2", "right-8", - "ring-2", - "ring-[1.5px]", - "ring-blue-light-400", - "ring-gray-light-200", + "ring-3-2", + "ring-3-[1.5px]", + "ring-3-blue-light-400", + "ring-3-gray-light-200", "rotate-45", "rounded", - "rounded-[6px]", - "rounded-b-lg", "rounded-full", "rounded-sm", "scale-50", "scale-75", + "scroll-mt-2", "scroll-mt-20", "scroll-mt-36", + "section-card", + "section-card-text", + "section-card-title", "select-none", "self-center", "self-start", "shadow", "shadow-lg", + "shadow-md", "sm:block", "sm:flex", "sm:flex-row", "sm:hidden", "sm:items-center", "sm:w-full", - "space-x-2", "space-y-2", "space-y-4", "sticky", + "sub-button", + "summary-bar", + "svg", "svg-container", "syntax-light", "systemd-networkd", + "tab-item", + "tablist", + "tabs", "text-2xl", - "text-amber-light", "text-base", "text-black", + "text-blue", "text-blue-light", "text-divider-light", - "text-gray-light", - "text-gray-light-200", - "text-gray-light-300", - "text-gray-light-500", - "text-gray-light-600", - "text-gray-light-800", - "text-green-light", + "text-gray", + "text-gray-200", + "text-gray-300", + "text-gray-400", + "text-gray-500", + "text-gray-600", + "text-gray-800", "text-left", "text-lg", "text-magenta-light", - "text-red-light", "text-sm", - "text-violet-light", "text-white", "text-xl", "text-xs", - "to-30%", "to-50%", - "to-75%", - "to-blue-light", - "to-magenta-light-400", - "to-transparent", - "to-white", + "to-blue-200", + "to-blue-500", "toc", "top-0", + "top-1", "top-16", - "top-3", "top-6", "top-full", "transition", + "transition-colors", + "transition-transform", "truncate", "underline-offset-2", - "uppercase", "w-2", + "w-5", + "w-56", + "w-65", "w-8", "w-[1200px]", - "w-[32px]", "w-fit", "w-full", "w-screen", diff --git a/i18n/en.yaml b/i18n/en.yaml index 32d25ee84ff0..bbe778e7d11c 100644 --- a/i18n/en.yaml +++ b/i18n/en.yaml @@ -34,21 +34,4 @@ apiPropValue: apiPropReq: other: Required apiPropDesc: - other: Description - -## component names - -buildx: - other: Buildx -buildkit: - other: BuildKit -engine: - other: Docker Engine -api: - other: Engine API -desktop: - other: Docker Desktop -compose: - other: Docker Compose -scout: - other: Docker Scout + other: Description \ No newline at end of file diff --git a/layouts/_default/_markup/render-blockquote.html b/layouts/_default/_markup/render-blockquote.html index e82786d5e5f2..3d3a6e60cea6 100644 --- a/layouts/_default/_markup/render-blockquote.html +++ b/layouts/_default/_markup/render-blockquote.html @@ -1,41 +1,42 @@ {{- $icons := dict - "caution" "dangerous" - "important" "report" - "note" "info" - "tip" "lightbulb" - "warning" "warning" + "caution" "warning.svg" + "important" "important.svg" + "note" "info.svg" + "tip" "lightbulb.svg" + "warning" "warning.svg" }} -{{- $borders := dict - "caution" "border-red-light dark:border-red-dark" - "important" "border-violet-light dark:border-violet-dark" - "note" "border-blue-light dark:border-blue-dark" - "tip" "border-green-light dark:border-green-dark" - "warning" "border-amber-light dark:border-amber-dark" -}} -{{- $textColors := dict - "caution" "text-red-light dark:text-red-dark" - "important" "text-violet-light dark:text-violet-dark" - "note" "text-blue-light dark:text-blue-dark" - "tip" "text-green-light dark:text-green-dark" - "warning" "text-amber-light dark:text-amber-dark" +{{- $admonitionClasses := dict + "caution" "admonition admonition-danger" + "important" "admonition admonition-note" + "note" "admonition admonition-note" + "tip" "admonition admonition-tip" + "warning" "admonition admonition-warning" }} +{{- $type := cond (index $icons .AlertType) .AlertType "note" }} +{{- $iconFile := index $icons $type }} +{{- $partial := printf "admonitions/icons/%s" $iconFile }} + {{ if eq .Type "alert" }}
-

- {{ $i := index $icons .AlertType }} - {{ partialCached "icon.html" $i $i }} + class="{{ index $admonitionClasses .AlertType }} admonition not-prose"> +

+ + {{- partialCached $partial . }} + + + {{ printf "%s%s" (upper (substr $.AlertType 0 1)) (substr $.AlertType 1) }} - {{ i18n .AlertType }} -

- {{ .Text | safeHTML }} +
+
+ {{ .Text | safeHTML }} +
{{ else }}
- {{ .Text | safeHTML }} + class="admonition not-prose"> + {{ .Text | safeHTML }}
{{ end }} diff --git a/layouts/_default/_markup/render-codeblock.html b/layouts/_default/_markup/render-codeblock.html index ce6e243cb48e..c0a0addb6dc9 100644 --- a/layouts/_default/_markup/render-codeblock.html +++ b/layouts/_default/_markup/render-codeblock.html @@ -1,38 +1,78 @@ -
+
{{ with .Attributes.title }} -
{{ . }}
+
+
+
+ {{ . }} +
+
+
{{ end }} -
- - {{ $lang := .Type | default "text" }} {{ $result := transform.Highlight .Inner - $lang .Options }} -
+ setTimeout(() => copying = false, 2000);" + > + + + {{ with .Attributes.collapse }} -
-
- +
+
+ +
+
+ {{ $result }} + +
-
- {{ $result }} - -
-
{{ else }} {{ $result }} {{ end }} diff --git a/layouts/_default/_markup/render-image.html b/layouts/_default/_markup/render-image.html index 7fd16e10d12c..92b76d42c9e0 100644 --- a/layouts/_default/_markup/render-image.html +++ b/layouts/_default/_markup/render-image.html @@ -14,6 +14,7 @@ {{ $height := $params.Get "h" }} {{ $border := $params.Has "border" }} +
{{ with .Title }} -
{{ . }}
+
{{ . }}
{{ end }}