diff --git a/.github/ISSUE_TEMPLATE/10_project-antalya-bug-report.md b/.github/ISSUE_TEMPLATE/10_project-antalya-bug-report.md
new file mode 100644
index 000000000000..0c8c15a05eaf
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/10_project-antalya-bug-report.md
@@ -0,0 +1,36 @@
+---
+name: Project Antalya Bug Report
+about: Help us improve Project Antalya
+title: ''
+labels: antalya
+assignees: ''
+
+---
+
+**Describe the bug**
+A clear and concise description of what the bug is.
+
+
+**To Reproduce**
+Steps to reproduce the behavior:
+1. Go to '...'
+2. Click on '....'
+3. Scroll down to '....'
+4. See error
+
+**Expected behavior**
+A clear and concise description of what you expected to happen.
+
+**Screenshots**
+If applicable, add screenshots to help explain your problem.
+
+**Key information**
+Provide relevant runtime details.
+ - Project Antalya Build Version
+ - Cloud provider, e.g., AWS
+ - Kubernetes provider, e.g., GKE or Minikube
+ - Object storage, e.g., AWS S3 or Minio
+ - Iceberg catalog, e.g., Glue with REST Proxy
+
+**Additional context**
+Add any other context about the problem here.
diff --git a/.github/ISSUE_TEMPLATE/10_question.yaml b/.github/ISSUE_TEMPLATE/10_question.yaml
deleted file mode 100644
index 71a3d3da6425..000000000000
--- a/.github/ISSUE_TEMPLATE/10_question.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-name: Question
-description: Ask a question about ClickHouse
-labels: ["question"]
-body:
- - type: markdown
- attributes:
- value: |
- > Make sure to check documentation https://clickhouse.com/docs/ first. If the question is concise and probably has a short answer, asking it in [community Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-1gh9ds7f4-PgDhJAaF8ad5RbWBAAjzFg) is probably the fastest way to find the answer. For more complicated questions, consider asking them on StackOverflow with "clickhouse" tag https://stackoverflow.com/questions/tagged/clickhouse
- - type: textarea
- attributes:
- label: Company or project name
- description: Put your company name or project description here.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Question
- description: Please put your question here.
- validations:
- required: true
diff --git a/.github/ISSUE_TEMPLATE/20_feature-request.yaml b/.github/ISSUE_TEMPLATE/20_feature-request.yaml
deleted file mode 100644
index 054efc2d61ee..000000000000
--- a/.github/ISSUE_TEMPLATE/20_feature-request.yaml
+++ /dev/null
@@ -1,38 +0,0 @@
-name: Feature request
-description: Suggest an idea for ClickHouse
-labels: ["feature"]
-body:
- - type: markdown
- attributes:
- value: |
- > (you don't have to strictly follow this form)
- - type: textarea
- attributes:
- label: Company or project name
- description: Put your company name or project description here.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Use case
- description: A clear and concise description of what the intended usage scenario is.
- validations:
- required: true
- - type: textarea
- attributes:
- label: Describe the solution you'd like
- description: A clear and concise description of what you want to happen.
- validations:
- required: true
- - type: textarea
- attributes:
- label: Describe alternatives you've considered
- description: A clear and concise description of any alternative solutions or features you've considered.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Additional context
- description: Add any other context or screenshots about the feature request here.
- validations:
- required: false
diff --git a/.github/ISSUE_TEMPLATE/20_project-antalya-feature-request.md b/.github/ISSUE_TEMPLATE/20_project-antalya-feature-request.md
new file mode 100644
index 000000000000..603584bf4428
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/20_project-antalya-feature-request.md
@@ -0,0 +1,20 @@
+---
+name: Project Antalya Feature request
+about: Suggest an idea for Project Antalya
+title: ''
+labels: antalya, enhancement
+assignees: ''
+
+---
+
+**Is your feature request related to a problem? Please describe.**
+A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
+
+**Describe the solution you'd like**
+A clear and concise description of what you want to happen.
+
+**Describe alternatives you've considered**
+A clear and concise description of any alternative solutions or features you've considered.
+
+**Additional context**
+Add any other context or screenshots about the feature request here.
diff --git a/.github/ISSUE_TEMPLATE/30_project-antalya-question.md b/.github/ISSUE_TEMPLATE/30_project-antalya-question.md
new file mode 100644
index 000000000000..c77cee4a916b
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/30_project-antalya-question.md
@@ -0,0 +1,16 @@
+---
+name: Project Antalya Question
+about: Ask a question about Project Antalya
+title: ''
+labels: ''
+assignees: ''
+
+---
+
+Make sure to check the [Altinity documentation](https://docs.altinity.com/) and the [Altinity Knowledge Base](https://kb.altinity.com/) first.
+
+If your question is concise and probably has a short answer, asking it in the [the Altinity Slack workspace](https://altinity.com/slack) is probably the fastest way to find the answer. Use the #antalya channel.
+
+If you'd rather file a GitHub issue, remove all this text and ask your question here.
+
+Please include relevant environment information as applicable.
diff --git a/.github/ISSUE_TEMPLATE/30_unexpected-behaviour.yaml b/.github/ISSUE_TEMPLATE/30_unexpected-behaviour.yaml
deleted file mode 100644
index 3cb55a960d18..000000000000
--- a/.github/ISSUE_TEMPLATE/30_unexpected-behaviour.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-name: Unexpected behaviour
-description: Some feature is working in non-obvious way
-labels: ["unexpected behaviour"]
-body:
- - type: markdown
- attributes:
- value: |
- > (you don't have to strictly follow this form)
- - type: textarea
- attributes:
- label: Company or project name
- description: Put your company name or project description here.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Describe the unexpected behaviour
- description: A clear and concise description of what works not as it is supposed to.
- validations:
- required: true
- - type: textarea
- attributes:
- label: How to reproduce
- description: |
- * Which ClickHouse server version to use
- * Which interface to use, if matters
- * Non-default settings, if any
- * `CREATE TABLE` statements for all tables involved
- * Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary
- * Queries to run that lead to unexpected result
- validations:
- required: true
- - type: textarea
- attributes:
- label: Expected behavior
- description: A clear and concise description of what you expected to happen.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Error message and/or stacktrace
- description: If applicable, add screenshots to help explain your problem.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Additional context
- description: Add any other context about the problem here.
- validations:
- required: false
diff --git a/.github/ISSUE_TEMPLATE/35_incomplete_implementation.yaml b/.github/ISSUE_TEMPLATE/35_incomplete_implementation.yaml
deleted file mode 100644
index 68ab7129a873..000000000000
--- a/.github/ISSUE_TEMPLATE/35_incomplete_implementation.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-name: Incomplete implementation
-description: Implementation of existing feature is not finished
-labels: ["unfinished code"]
-body:
- - type: markdown
- attributes:
- value: |
- > (you don't have to strictly follow this form)
- - type: textarea
- attributes:
- label: Company or project name
- description: Put your company name or project description here.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Describe the unexpected behaviour
- description: A clear and concise description of what works not as it is supposed to.
- validations:
- required: true
- - type: textarea
- attributes:
- label: How to reproduce
- description: |
- * Which ClickHouse server version to use
- * Which interface to use, if matters
- * Non-default settings, if any
- * `CREATE TABLE` statements for all tables involved
- * Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary
- * Queries to run that lead to unexpected result
- validations:
- required: true
- - type: textarea
- attributes:
- label: Expected behavior
- description: A clear and concise description of what you expected to happen.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Error message and/or stacktrace
- description: If applicable, add screenshots to help explain your problem.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Additional context
- description: Add any other context about the problem here.
- validations:
- required: false
diff --git a/.github/ISSUE_TEMPLATE/40_altinity-stable-bug-report.md b/.github/ISSUE_TEMPLATE/40_altinity-stable-bug-report.md
new file mode 100644
index 000000000000..90bf241dc195
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/40_altinity-stable-bug-report.md
@@ -0,0 +1,50 @@
+---
+name: Altinity Stable Bug report
+about: Report something broken in an Altinity Stable Build
+title: ''
+labels: stable
+assignees: ''
+
+---
+
+✅ *I checked [the Altinity Stable Builds lifecycle table](https://docs.altinity.com/altinitystablebuilds/#altinity-stable-builds-life-cycle-table), and the Altinity Stable Build version I'm using is still supported.*
+
+## Type of problem
+Choose one of the following items, then delete the others:
+
+**Bug report** - something's broken
+
+**Incomplete implementation** - something's not quite right
+
+**Performance issue** - something works, just not as quickly as it should
+
+**Backwards compatibility issue** - something used to work, but now it doesn't
+
+**Unexpected behavior** - something surprising happened, but it wasn't the good kind of surprise
+
+**Installation issue** - something doesn't install the way it should
+
+**Usability issue** - something works, but it could be a lot easier
+
+**Documentation issue** - something in the docs is wrong, incomplete, or confusing
+
+## Describe the situation
+A clear, concise description of what's happening. Can you reproduce it in a ClickHouse Official build of the same version?
+
+## How to reproduce the behavior
+
+* Which Altinity Stable Build version to use
+* Which interface to use, if it matters
+* Non-default settings, if any
+* `CREATE TABLE` statements for all tables involved
+* Sample data for all these tables, use the [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/31fd4f5eb41d5ec26724fc645c11fe4d62eae07f/programs/obfuscator/README.md) if necessary
+* Queries to run that lead to an unexpected result
+
+## Expected behavior
+A clear, concise description of what you expected to happen.
+
+## Logs, error messages, stacktraces, screenshots...
+Add any details that might explain the issue.
+
+## Additional context
+Add any other context about the issue here.
diff --git a/.github/ISSUE_TEMPLATE/45_usability-issue.yaml b/.github/ISSUE_TEMPLATE/45_usability-issue.yaml
deleted file mode 100644
index 96543a7af6c9..000000000000
--- a/.github/ISSUE_TEMPLATE/45_usability-issue.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-name: Usability issue
-description: Report something can be made more convenient to use
-labels: ["usability"]
-body:
- - type: markdown
- attributes:
- value: |
- > (you don't have to strictly follow this form)
- - type: textarea
- attributes:
- label: Company or project name
- description: Put your company name or project description here.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Describe the unexpected behaviour
- description: A clear and concise description of what works not as it is supposed to.
- validations:
- required: true
- - type: textarea
- attributes:
- label: How to reproduce
- description: |
- * Which ClickHouse server version to use
- * Which interface to use, if matters
- * Non-default settings, if any
- * `CREATE TABLE` statements for all tables involved
- * Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary
- * Queries to run that lead to unexpected result
- validations:
- required: true
- - type: textarea
- attributes:
- label: Expected behavior
- description: A clear and concise description of what you expected to happen.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Error message and/or stacktrace
- description: If applicable, add screenshots to help explain your problem.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Additional context
- description: Add any other context about the problem here.
- validations:
- required: false
diff --git a/.github/ISSUE_TEMPLATE/50_altinity-stable-question.md b/.github/ISSUE_TEMPLATE/50_altinity-stable-question.md
new file mode 100644
index 000000000000..027970e25a02
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/50_altinity-stable-question.md
@@ -0,0 +1,16 @@
+---
+name: Altinity Stable Question
+about: Ask a question about an Altinity Stable Build
+title: ''
+labels: question, stable
+assignees: ''
+
+---
+
+Make sure to check the [Altinity documentation](https://docs.altinity.com/) and the [Altinity Knowledge Base](https://kb.altinity.com/) first.
+
+If your question is concise and probably has a short answer, asking it in the [the Altinity Slack channel](https://altinity.com/slack) is probably the fastest way to find the answer.
+
+For more complicated questions, consider [asking them on StackOverflow with the tag "clickhouse"](https://stackoverflow.com/questions/tagged/clickhouse).
+
+If you'd rather file a GitHub issue, remove all this text and ask your question here.
diff --git a/.github/ISSUE_TEMPLATE/50_build-issue.yaml b/.github/ISSUE_TEMPLATE/50_build-issue.yaml
deleted file mode 100644
index a96f538bcc89..000000000000
--- a/.github/ISSUE_TEMPLATE/50_build-issue.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-name: Build issue
-description: Report failed ClickHouse build from master
-labels: ["build"]
-body:
- - type: markdown
- attributes:
- value: |
- > Make sure that `git diff` result is empty and you've just pulled fresh master. Try cleaning up cmake cache. Just in case, official build instructions are published here: https://clickhouse.com/docs/en/development/build/
- - type: textarea
- attributes:
- label: Company or project name
- description: Put your company name or project description here.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Operating system
- description: OS kind or distribution, specific version/release, non-standard kernel if any. If you are trying to build inside virtual machine, please mention it too.
- validations:
- required: true
- - type: textarea
- attributes:
- label: CMake version
- description: The output of `cmake --version`.
- validations:
- required: true
- - type: textarea
- attributes:
- label: Ninja version
- description: The output of `ninja --version`.
- validations:
- required: true
- - type: textarea
- attributes:
- label: Compiler name and version
- description: We recommend to use clang. The version can be obtained via `clang --version`.
- validations:
- required: true
- - type: textarea
- attributes:
- label: Full cmake and/or ninja output
- description: Please include everything!
- validations:
- required: true
diff --git a/.github/ISSUE_TEMPLATE/60_documentation-issue.yaml b/.github/ISSUE_TEMPLATE/60_documentation-issue.yaml
deleted file mode 100644
index bba6df87a783..000000000000
--- a/.github/ISSUE_TEMPLATE/60_documentation-issue.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-name: Documentation issue
-description: Report something incorrect or missing in documentation
-labels: ["comp-documentation"]
-body:
- - type: markdown
- attributes:
- value: |
- > Make sure that `git diff` result is empty and you've just pulled fresh master. Try cleaning up cmake cache. Just in case, official build instructions are published here: https://clickhouse.com/docs/en/development/build/
- - type: textarea
- attributes:
- label: Company or project name
- description: Put your company name or project description here.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Describe the issue
- description: A clear and concise description of what's wrong in documentation.
- validations:
- required: true
- - type: textarea
- attributes:
- label: Additional context
- description: Add any other context about the problem here.
- validations:
- required: false
diff --git a/.github/ISSUE_TEMPLATE/70_performance-issue.yaml b/.github/ISSUE_TEMPLATE/70_performance-issue.yaml
deleted file mode 100644
index 281d51c73b51..000000000000
--- a/.github/ISSUE_TEMPLATE/70_performance-issue.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-name: Performance issue
-description: Report something working slower than expected
-labels: ["performance"]
-body:
- - type: markdown
- attributes:
- value: |
- > (you don't have to strictly follow this form)
- - type: textarea
- attributes:
- label: Company or project name
- description: Put your company name or project description here.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Describe the situation
- description: What exactly works slower than expected?
- validations:
- required: true
- - type: textarea
- attributes:
- label: How to reproduce
- description: |
- * Which ClickHouse server version to use
- * Which interface to use, if matters
- * Non-default settings, if any
- * `CREATE TABLE` statements for all tables involved
- * Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary
- * Queries to run that lead to unexpected result
- validations:
- required: true
- - type: textarea
- attributes:
- label: Expected performance
- description: What are your performance expectation, why do you think they are realistic? Has it been working faster in older ClickHouse releases? Is it working faster in some specific other system?
- validations:
- required: false
- - type: textarea
- attributes:
- label: Additional context
- description: Add any other context about the problem here.
- validations:
- required: false
diff --git a/.github/ISSUE_TEMPLATE/80_backward-compatibility.yaml b/.github/ISSUE_TEMPLATE/80_backward-compatibility.yaml
deleted file mode 100644
index 32786cd87dca..000000000000
--- a/.github/ISSUE_TEMPLATE/80_backward-compatibility.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-name: Backward compatibility issue
-description: Report the case when the behaviour of a new version can break existing use cases
-labels: ["backward compatibility"]
-body:
- - type: markdown
- attributes:
- value: |
- > (you don't have to strictly follow this form)
- - type: textarea
- attributes:
- label: Company or project name
- description: Put your company name or project description here.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Describe the unexpected behaviour
- description: A clear and concise description of what works not as it is supposed to.
- validations:
- required: true
- - type: textarea
- attributes:
- label: How to reproduce
- description: |
- * Which ClickHouse server version to use
- * Which interface to use, if matters
- * Non-default settings, if any
- * `CREATE TABLE` statements for all tables involved
- * Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary
- * Queries to run that lead to unexpected result
- validations:
- required: true
- - type: textarea
- attributes:
- label: Error message and/or stacktrace
- description: If applicable, add screenshots to help explain your problem.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Additional context
- description: Add any other context about the problem here.
- validations:
- required: false
diff --git a/.github/ISSUE_TEMPLATE/85_bug-report.yaml b/.github/ISSUE_TEMPLATE/85_bug-report.yaml
deleted file mode 100644
index 5344fbcda255..000000000000
--- a/.github/ISSUE_TEMPLATE/85_bug-report.yaml
+++ /dev/null
@@ -1,76 +0,0 @@
-name: Bug report
-description: Wrong behavior (visible to users) in the official ClickHouse release.
-labels: ["potential bug"]
-body:
- - type: markdown
- attributes:
- value: |
- > Please make sure that the version you're using is still supported (you can find the list [here](https://github.com/ClickHouse/ClickHouse/blob/master/SECURITY.md#scope-and-supported-versions)).
- > You have to provide the following information whenever possible.
- - type: textarea
- attributes:
- label: Company or project name
- description: Put your company name or project description here.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Describe what's wrong
- description: |
- * A clear and concise description of what works not as it is supposed to.
- * A link to reproducer in [https://fiddle.clickhouse.com/](https://fiddle.clickhouse.com/).
- validations:
- required: true
- - type: dropdown
- attributes:
- label: Does it reproduce on the most recent release?
- description: |
- [The list of releases](https://github.com/ClickHouse/ClickHouse/blob/master/utils/list-versions/version_date.tsv)
- options:
- - 'Yes'
- - 'No'
- validations:
- required: true
- - type: markdown
- attributes:
- value: |
- -----
- > Change "enabled" to true in "send_crash_reports" section in `config.xml`:
- ```xml
-
-
-
- false
-
- ```
- -----
- - type: textarea
- attributes:
- label: How to reproduce
- description: |
- * Which ClickHouse server version to use
- * Which interface to use, if matters
- * Non-default settings, if any
- * `CREATE TABLE` statements for all tables involved
- * Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/master/programs/obfuscator/Obfuscator.cpp#L42-L80) if necessary
- * Queries to run that lead to unexpected result
- validations:
- required: true
- - type: textarea
- attributes:
- label: Expected behavior
- description: A clear and concise description of what you expected to happen.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Error message and/or stacktrace
- description: If applicable, add screenshots to help explain your problem.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Additional context
- description: Add any other context about the problem here.
- validations:
- required: false
diff --git a/.github/ISSUE_TEMPLATE/90_fuzzing-report.yaml b/.github/ISSUE_TEMPLATE/90_fuzzing-report.yaml
deleted file mode 100644
index 84dc8a372e5a..000000000000
--- a/.github/ISSUE_TEMPLATE/90_fuzzing-report.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-name: Assertion found via fuzzing
-description: Potential issue has been found via Fuzzer or Stress tests
-labels: ["fuzz"]
-body:
- - type: markdown
- attributes:
- value: |
- > (you don't have to strictly follow this form)
- - type: textarea
- attributes:
- label: Describe the bug
- description: A link to the report.
- validations:
- required: true
- - type: textarea
- attributes:
- label: How to reproduce
- description: Try to reproduce the report and copy the tables and queries involved.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Error message and/or stacktrace
- description: You can find additional information in server logs.
- validations:
- required: false
diff --git a/.github/ISSUE_TEMPLATE/95_sanitizer-report.yaml b/.github/ISSUE_TEMPLATE/95_sanitizer-report.yaml
deleted file mode 100644
index 7bb47e2b824b..000000000000
--- a/.github/ISSUE_TEMPLATE/95_sanitizer-report.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-name: Sanitizer alert
-description: Potential issue has been found by special code instrumentation
-labels: ["testing"]
-body:
- - type: markdown
- attributes:
- value: |
- > (you don't have to strictly follow this form)
- - type: textarea
- attributes:
- label: Describe the bug
- description: A link to the report.
- validations:
- required: true
- - type: textarea
- attributes:
- label: How to reproduce
- description: Try to reproduce the report and copy the tables and queries involved.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Error message and/or stacktrace
- description: You can find additional information in server logs.
- validations:
- required: false
diff --git a/.github/ISSUE_TEMPLATE/96_installation-issues.yaml b/.github/ISSUE_TEMPLATE/96_installation-issues.yaml
deleted file mode 100644
index f71f6079453e..000000000000
--- a/.github/ISSUE_TEMPLATE/96_installation-issues.yaml
+++ /dev/null
@@ -1,46 +0,0 @@
-name: Installation issue
-description: Issue with ClickHouse installation from https://clickhouse.com/docs/en/install/
-labels: ["comp-install"]
-body:
- - type: markdown
- attributes:
- value: |
- > **I have tried the following solutions**: https://clickhouse.com/docs/en/faq/troubleshooting/#troubleshooting-installation-errors
- - type: textarea
- attributes:
- label: Company or project name
- description: Put your company name or project description here.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Installation type
- description: Packages, docker, single binary, curl?
- validations:
- required: true
- - type: textarea
- attributes:
- label: Source of the ClickHouse
- description: A link to the source. Or the command you've tried.
- validations:
- required: true
- - type: textarea
- attributes:
- label: Describe the problem.
- description: What went wrong and what is the expected result?
- validations:
- required: true
- - type: textarea
- attributes:
- label: Error message and/or stacktrace
- description: You can find additional information in server logs.
- validations:
- required: false
- - type: textarea
- attributes:
- label: How to reproduce
- description: |
- * For Linux-based operating systems: provide a script for clear docker container from the official image
- * For anything else: steps to reproduce on as much as possible clear system
- validations:
- required: false
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 7a933bb4d857..d15e5a8b50e2 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -23,19 +23,4 @@ tests/ci/cancel_and_rerun_workflow_lambda/app.py
### Documentation entry for user-facing changes
-- [ ] Documentation is written (mandatory for new features)
-
diff --git a/.github/actionlint.yml b/.github/actionlint.yml
index cf5f575e3c74..904a548dadd5 100644
--- a/.github/actionlint.yml
+++ b/.github/actionlint.yml
@@ -1,9 +1,9 @@
self-hosted-runner:
labels:
- - builder
- - func-tester
- - func-tester-aarch64
+ - altinity-builder
+ - altinity-func-tester
+ - altinity-func-tester-aarch64
- fuzzer-unit-tester
- - style-checker
- - style-checker-aarch64
+ - altinity-style-checker
+ - altinity-style-checker-aarch64
- release-maker
diff --git a/.github/actions/common_setup/action.yml b/.github/actions/common_setup/action.yml
index e492fa97816d..87db4d9fa503 100644
--- a/.github/actions/common_setup/action.yml
+++ b/.github/actions/common_setup/action.yml
@@ -28,3 +28,20 @@ runs:
run: |
# to remove every leftovers
sudo rm -fr "$TEMP_PATH" && mkdir -p "$TEMP_PATH"
+ - name: Setup zram
+ shell: bash
+ run: |
+ # Check if zram is already set up
+ if ! lsmod | grep -q "^zram "; then
+ sudo modprobe zram
+ fi
+
+ # Only proceed with setup if /dev/zram0 is not already in use
+ if ! swapon -s | grep -q "/dev/zram0"; then
+ MemTotal=$(grep -Po "(?<=MemTotal:)\s+\d+" /proc/meminfo) # KiB
+ Percent=200
+ ZRAM_SIZE=$(($MemTotal / 1024 / 1024 * $Percent / 100)) # Convert to GiB
+ .github/retry.sh 30 2 sudo zramctl --size ${ZRAM_SIZE}GiB --algorithm zstd /dev/zram0
+ sudo mkswap /dev/zram0 && sudo swapon -p 100 /dev/zram0
+ sudo sysctl vm.swappiness=200
+ fi
diff --git a/.github/actions/create_workflow_report/action.yml b/.github/actions/create_workflow_report/action.yml
new file mode 100644
index 000000000000..dbca19c28e37
--- /dev/null
+++ b/.github/actions/create_workflow_report/action.yml
@@ -0,0 +1,40 @@
+name: Create and Upload Combined Report
+description: Create and upload a combined CI report
+inputs:
+ final:
+ description: "Control whether the report is final or a preview"
+ required: false
+ default: "false"
+runs:
+ using: "composite"
+ steps:
+ - name: Create and upload workflow report
+ env:
+ COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
+ ACTIONS_RUN_URL: ${{ github.event.repository.html_url }}/actions/runs/${{ github.run_id }}
+ FINAL: ${{ inputs.final }}
+ shell: bash
+ run: |
+ pip install clickhouse-driver==0.2.8 numpy==1.26.4 pandas==2.0.3 jinja2==3.1.5
+
+ CMD="python3 .github/actions/create_workflow_report/create_workflow_report.py"
+ ARGS="--commit-sha $COMMIT_SHA --actions-run-url $ACTIONS_RUN_URL --known-fails tests/broken_tests.json --cves"
+
+ set +e -x
+ if [[ "$FINAL" == "false" ]]; then
+ REPORT_LINK=$($CMD $ARGS --mark-preview)
+ else
+ REPORT_LINK=$($CMD $ARGS)
+ fi
+
+ echo $REPORT_LINK
+
+ if [[ "$FINAL" == "true" ]]; then
+ IS_VALID_URL=$(echo $REPORT_LINK | grep -E '^https?://')
+ if [[ -n $IS_VALID_URL ]]; then
+ echo "Workflow Run Report: [View Report]($REPORT_LINK)" >> $GITHUB_STEP_SUMMARY
+ else
+ echo "Error: $REPORT_LINK" >> $GITHUB_STEP_SUMMARY
+ exit 1
+ fi
+ fi
diff --git a/.github/actions/create_workflow_report/ci_run_report.html.jinja b/.github/actions/create_workflow_report/ci_run_report.html.jinja
new file mode 100644
index 000000000000..a92c1aa34e3a
--- /dev/null
+++ b/.github/actions/create_workflow_report/ci_run_report.html.jinja
@@ -0,0 +1,269 @@
+
+
+
+
+
+
+
+
+ {{ title }}
+
+
+
+
+
+
+ {{ title }}
+
+ {% if is_preview %}
+ This is a preview. The workflow is not yet finished.
+ {% endif %}
+ Table of Contents
+
+
+ {%- if pr_number != 0 -%}
+ New Fails in PR
+ Compared with base sha {{ base_sha }}
+ {{ new_fails_html }}
+ {%- endif %}
+
+ CI Jobs Status
+ {{ ci_jobs_status_html }}
+
+ Checks Errors
+ {{ checks_errors_html }}
+
+ Checks New Fails
+ {{ checks_fails_html }}
+
+ Regression New Fails
+ {{ regression_fails_html }}
+
+ Docker Images CVEs
+ {{ docker_images_cves_html }}
+
+ Checks Known Fails
+
+ Fail reason conventions:
+ KNOWN - Accepted fail and fix is not planned
+ INVESTIGATE - We don't know why it fails
+ NEEDSFIX - Investigation done and a fix is needed to make it pass
+
+ {{ checks_known_fails_html }}
+
+
+
+
\ No newline at end of file
diff --git a/.github/actions/create_workflow_report/create_workflow_report.py b/.github/actions/create_workflow_report/create_workflow_report.py
new file mode 100755
index 000000000000..741272685678
--- /dev/null
+++ b/.github/actions/create_workflow_report/create_workflow_report.py
@@ -0,0 +1,664 @@
+#!/usr/bin/env python3
+import argparse
+import os
+from pathlib import Path
+from itertools import combinations
+import json
+from datetime import datetime
+from functools import lru_cache
+
+import pandas as pd
+from jinja2 import Environment, FileSystemLoader
+import requests
+from clickhouse_driver import Client
+import boto3
+from botocore.exceptions import NoCredentialsError
+
+DATABASE_HOST_VAR = "CHECKS_DATABASE_HOST"
+DATABASE_USER_VAR = "CHECKS_DATABASE_USER"
+DATABASE_PASSWORD_VAR = "CHECKS_DATABASE_PASSWORD"
+S3_BUCKET = "altinity-build-artifacts"
+GITHUB_REPO = "Altinity/ClickHouse"
+
+# Set up the Jinja2 environment
+template_dir = os.path.dirname(__file__)
+
+# Load the template
+template = Environment(loader=FileSystemLoader(template_dir)).get_template(
+ "ci_run_report.html.jinja"
+)
+
+
+def get_commit_statuses(sha: str) -> pd.DataFrame:
+ """
+ Fetch commit statuses for a given SHA and return as a pandas DataFrame.
+ Handles pagination to get all statuses.
+
+ Args:
+ sha (str): Commit SHA to fetch statuses for.
+
+ Returns:
+ pd.DataFrame: DataFrame containing all statuses.
+ """
+ headers = {
+ "Authorization": f"token {os.getenv('GITHUB_TOKEN')}",
+ "Accept": "application/vnd.github.v3+json",
+ }
+
+ url = f"https://api.github.com/repos/{GITHUB_REPO}/commits/{sha}/statuses"
+
+ all_data = []
+
+ while url:
+ response = requests.get(url, headers=headers)
+
+ if response.status_code != 200:
+ raise Exception(
+ f"Failed to fetch statuses: {response.status_code} {response.text}"
+ )
+
+ data = response.json()
+ all_data.extend(data)
+
+ # Check for pagination links in the response headers
+ if "Link" in response.headers:
+ links = response.headers["Link"].split(",")
+ next_url = None
+
+ for link in links:
+ parts = link.strip().split(";")
+ if len(parts) == 2 and 'rel="next"' in parts[1]:
+ next_url = parts[0].strip("<>")
+ break
+
+ url = next_url
+ else:
+ url = None
+
+ # Parse relevant fields
+ parsed = [
+ {
+ "job_name": item["context"],
+ "job_status": item["state"],
+ "message": item["description"],
+ "results_link": item["target_url"],
+ }
+ for item in all_data
+ ]
+
+ # Create DataFrame
+ df = pd.DataFrame(parsed)
+
+ # Drop duplicates keeping the first occurrence (newest status for each context)
+ # GitHub returns statuses in reverse chronological order
+ df = df.drop_duplicates(subset=["job_name"], keep="first")
+
+ # Sort by status and job name
+ return df.sort_values(
+ by=["job_status", "job_name"], ascending=[True, True]
+ ).reset_index(drop=True)
+
+
+def get_pr_info_from_number(pr_number: str) -> dict:
+ """
+ Fetch pull request information for a given PR number.
+
+ Args:
+ pr_number (str): Pull request number to fetch information for.
+
+ Returns:
+ dict: Dictionary containing PR information.
+ """
+ headers = {
+ "Authorization": f"token {os.getenv('GITHUB_TOKEN')}",
+ "Accept": "application/vnd.github.v3+json",
+ }
+
+ url = f"https://api.github.com/repos/{GITHUB_REPO}/pulls/{pr_number}"
+ response = requests.get(url, headers=headers)
+
+ if response.status_code != 200:
+ raise Exception(
+ f"Failed to fetch pull request info: {response.status_code} {response.text}"
+ )
+
+ return response.json()
+
+
+@lru_cache
+def get_run_details(run_url: str) -> dict:
+ """
+ Fetch run details for a given run URL.
+ """
+ run_id = run_url.split("/")[-1]
+
+ headers = {
+ "Authorization": f"token {os.getenv('GITHUB_TOKEN')}",
+ "Accept": "application/vnd.github.v3+json",
+ }
+
+ url = f"https://api.github.com/repos/{GITHUB_REPO}/actions/runs/{run_id}"
+ response = requests.get(url, headers=headers)
+
+ if response.status_code != 200:
+ raise Exception(
+ f"Failed to fetch run details: {response.status_code} {response.text}"
+ )
+
+ return response.json()
+
+
+def get_checks_fails(client: Client, commit_sha: str, branch_name: str):
+ """
+ Get tests that did not succeed for the given commit and branch.
+ Exclude checks that have status 'error' as they are counted in get_checks_errors.
+ """
+ query = f"""SELECT job_status, job_name, status as test_status, test_name, results_link
+ FROM (
+ SELECT
+ argMax(check_status, check_start_time) as job_status,
+ check_name as job_name,
+ argMax(test_status, check_start_time) as status,
+ test_name,
+ report_url as results_link,
+ task_url
+ FROM `gh-data`.checks
+ WHERE commit_sha='{commit_sha}'
+ GROUP BY check_name, test_name, report_url, task_url
+ )
+ WHERE test_status IN ('FAIL', 'ERROR')
+ AND job_status!='error'
+ ORDER BY job_name, test_name
+ """
+ return client.query_dataframe(query)
+
+
+def get_checks_known_fails(
+ client: Client, commit_sha: str, branch_name: str, known_fails: dict
+):
+ """
+ Get tests that are known to fail for the given commit and branch.
+ """
+ if len(known_fails) == 0:
+ return pd.DataFrame()
+
+ query = f"""SELECT job_status, job_name, status as test_status, test_name, results_link
+ FROM (
+ SELECT
+ argMax(check_status, check_start_time) as job_status,
+ check_name as job_name,
+ argMax(test_status, check_start_time) as status,
+ test_name,
+ report_url as results_link,
+ task_url
+ FROM `gh-data`.checks
+ WHERE commit_sha='{commit_sha}'
+ GROUP BY check_name, test_name, report_url, task_url
+ )
+ WHERE test_status='BROKEN'
+ AND test_name IN ({','.join(f"'{test}'" for test in known_fails.keys())})
+ ORDER BY job_name, test_name
+ """
+
+ df = client.query_dataframe(query)
+
+ df.insert(
+ len(df.columns) - 1,
+ "reason",
+ df["test_name"]
+ .astype(str)
+ .apply(
+ lambda test_name: known_fails[test_name].get("reason", "No reason given")
+ ),
+ )
+
+ return df
+
+
+def get_checks_errors(client: Client, commit_sha: str, branch_name: str):
+ """
+ Get checks that have status 'error' for the given commit and branch.
+ """
+ query = f"""SELECT job_status, job_name, status as test_status, test_name, results_link
+ FROM (
+ SELECT
+ argMax(check_status, check_start_time) as job_status,
+ check_name as job_name,
+ argMax(test_status, check_start_time) as status,
+ test_name,
+ report_url as results_link,
+ task_url
+ FROM `gh-data`.checks
+ WHERE commit_sha='{commit_sha}'
+ GROUP BY check_name, test_name, report_url, task_url
+ )
+ WHERE job_status=='error'
+ ORDER BY job_name, test_name
+ """
+ return client.query_dataframe(query)
+
+
+def drop_prefix_rows(df, column_to_clean):
+ """
+ Drop rows from the dataframe if:
+ - the row matches another row completely except for the specified column
+ - the specified column of that row is a prefix of the same column in another row
+ """
+ to_drop = set()
+ reference_columns = [col for col in df.columns if col != column_to_clean]
+ for (i, row_1), (j, row_2) in combinations(df.iterrows(), 2):
+ if all(row_1[col] == row_2[col] for col in reference_columns):
+ if row_2[column_to_clean].startswith(row_1[column_to_clean]):
+ to_drop.add(i)
+ elif row_1[column_to_clean].startswith(row_2[column_to_clean]):
+ to_drop.add(j)
+ return df.drop(to_drop)
+
+
+def get_regression_fails(client: Client, job_url: str):
+ """
+ Get regression tests that did not succeed for the given job URL.
+ """
+ # If you rename the alias for report_url, also update the formatters in format_results_as_html_table
+ # Nested SELECT handles test reruns
+ query = f"""SELECT arch, job_name, status, test_name, results_link
+ FROM (
+ SELECT
+ architecture as arch,
+ test_name,
+ argMax(result, start_time) AS status,
+ job_name,
+ report_url as results_link,
+ job_url
+ FROM `gh-data`.clickhouse_regression_results
+ GROUP BY architecture, test_name, job_url, job_name, report_url
+ ORDER BY length(test_name) DESC
+ )
+ WHERE job_url LIKE '{job_url}%'
+ AND status IN ('Fail', 'Error')
+ """
+ df = client.query_dataframe(query)
+ df = drop_prefix_rows(df, "test_name")
+ df["job_name"] = df["job_name"].str.title()
+ return df
+
+
+def get_new_fails_this_pr(
+ client: Client,
+ pr_info: dict,
+ checks_fails: pd.DataFrame,
+ regression_fails: pd.DataFrame,
+):
+ """
+ Get tests that failed in the PR but passed in the base branch.
+ Compares both checks and regression test results.
+ """
+ base_sha = pr_info.get("base", {}).get("sha")
+ if not base_sha:
+ raise Exception("No base SHA found for PR")
+
+ # Modify tables to have the same columns
+ if len(checks_fails) > 0:
+ checks_fails = checks_fails.copy().drop(columns=["job_status"])
+ if len(regression_fails) > 0:
+ regression_fails = regression_fails.copy()
+ regression_fails["job_name"] = regression_fails.apply(
+ lambda row: f"{row['arch']} {row['job_name']}".strip(), axis=1
+ )
+ regression_fails["test_status"] = regression_fails["status"]
+
+ # Combine both types of fails and select only desired columns
+ desired_columns = ["job_name", "test_name", "test_status", "results_link"]
+ all_pr_fails = pd.concat([checks_fails, regression_fails], ignore_index=True)[
+ desired_columns
+ ]
+ if len(all_pr_fails) == 0:
+ return pd.DataFrame()
+
+ # Get all checks from the base branch that didn't fail
+ base_checks_query = f"""SELECT job_name, status as test_status, test_name, results_link
+ FROM (
+ SELECT
+ check_name as job_name,
+ argMax(test_status, check_start_time) as status,
+ test_name,
+ report_url as results_link,
+ task_url
+ FROM `gh-data`.checks
+ WHERE commit_sha='{base_sha}'
+ GROUP BY check_name, test_name, report_url, task_url
+ )
+ WHERE test_status NOT IN ('FAIL', 'ERROR')
+ ORDER BY job_name, test_name
+ """
+ base_checks = client.query_dataframe(base_checks_query)
+
+ # Get regression results from base branch that didn't fail
+ base_regression_query = f"""SELECT arch, job_name, status, test_name, results_link
+ FROM (
+ SELECT
+ architecture as arch,
+ test_name,
+ argMax(result, start_time) AS status,
+ job_url,
+ job_name,
+ report_url as results_link
+ FROM `gh-data`.clickhouse_regression_results
+ WHERE results_link LIKE'%/{base_sha}/%'
+ GROUP BY architecture, test_name, job_url, job_name, report_url
+ ORDER BY length(test_name) DESC
+ )
+ WHERE status NOT IN ('Fail', 'Error')
+ """
+ base_regression = client.query_dataframe(base_regression_query)
+ if len(base_regression) > 0:
+ base_regression["job_name"] = base_regression.apply(
+ lambda row: f"{row['arch']} {row['job_name']}".strip(), axis=1
+ )
+ base_regression["test_status"] = base_regression["status"]
+ base_regression = base_regression.drop(columns=["arch", "status"])
+
+ # Combine base results
+ base_results = pd.concat([base_checks, base_regression], ignore_index=True)
+
+ # Find tests that failed in PR but passed in base
+ pr_failed_tests = set(zip(all_pr_fails["job_name"], all_pr_fails["test_name"]))
+ base_passed_tests = set(zip(base_results["job_name"], base_results["test_name"]))
+
+ new_fails = pr_failed_tests.intersection(base_passed_tests)
+
+ # Filter PR results to only include new fails
+ mask = all_pr_fails.apply(
+ lambda row: (row["job_name"], row["test_name"]) in new_fails, axis=1
+ )
+ new_fails_df = all_pr_fails[mask]
+
+ return new_fails_df
+
+
+def get_cves(pr_number, commit_sha):
+ """
+ Fetch Grype results from S3.
+
+ If no results are available for download, returns ... (Ellipsis).
+ """
+ s3_client = boto3.client("s3", endpoint_url=os.getenv("S3_URL"))
+ s3_prefix = f"{pr_number}/{commit_sha}/grype/"
+
+ results = []
+
+ response = s3_client.list_objects_v2(
+ Bucket=S3_BUCKET, Prefix=s3_prefix, Delimiter="/"
+ )
+ grype_result_dirs = [
+ content["Prefix"] for content in response.get("CommonPrefixes", [])
+ ]
+
+ if len(grype_result_dirs) == 0:
+ # We were asked to check the CVE data, but none was found,
+ # maybe this is a preview report and grype results are not available yet
+ return ...
+
+ for path in grype_result_dirs:
+ file_key = f"{path}result.json"
+ file_response = s3_client.get_object(Bucket=S3_BUCKET, Key=file_key)
+ content = file_response["Body"].read().decode("utf-8")
+ results.append(json.loads(content))
+
+ rows = []
+ for scan_result in results:
+ for match in scan_result["matches"]:
+ rows.append(
+ {
+ "docker_image": scan_result["source"]["target"]["userInput"],
+ "severity": match["vulnerability"]["severity"],
+ "identifier": match["vulnerability"]["id"],
+ "namespace": match["vulnerability"]["namespace"],
+ }
+ )
+
+ if len(rows) == 0:
+ return pd.DataFrame()
+
+ df = pd.DataFrame(rows).drop_duplicates()
+ df = df.sort_values(
+ by="severity",
+ key=lambda col: col.str.lower().map(
+ {"critical": 1, "high": 2, "medium": 3, "low": 4, "negligible": 5}
+ ),
+ )
+ return df
+
+
+def url_to_html_link(url: str) -> str:
+ if not url:
+ return ""
+ text = url.split("/")[-1].split("?")[0]
+ if not text:
+ text = "results"
+ return f'{text} '
+
+
+def format_test_name_for_linewrap(text: str) -> str:
+ """Tweak the test name to improve line wrapping."""
+ return f'{text} '
+
+
+def format_test_status(text: str) -> str:
+ """Format the test status for better readability."""
+ color = (
+ "red"
+ if text.lower().startswith("fail")
+ else "orange" if text.lower() in ("error", "broken", "pending") else "green"
+ )
+ return f'{text} '
+
+
+def format_results_as_html_table(results) -> str:
+ if len(results) == 0:
+ return "Nothing to report
"
+ results.columns = [col.replace("_", " ").title() for col in results.columns]
+ html = results.to_html(
+ index=False,
+ formatters={
+ "Results Link": url_to_html_link,
+ "Test Name": format_test_name_for_linewrap,
+ "Test Status": format_test_status,
+ "Job Status": format_test_status,
+ "Status": format_test_status,
+ "Message": lambda m: m.replace("\n", " "),
+ "Identifier": lambda i: url_to_html_link(
+ "https://nvd.nist.gov/vuln/detail/" + i
+ ),
+ },
+ escape=False,
+ border=0,
+ classes=["test-results-table"],
+ )
+ return html
+
+
+def parse_args() -> argparse.Namespace:
+ parser = argparse.ArgumentParser(description="Create a combined CI report.")
+ parser.add_argument( # Need the full URL rather than just the ID to query the databases
+ "--actions-run-url", required=True, help="URL of the actions run"
+ )
+ parser.add_argument(
+ "--pr-number", help="Pull request number for the S3 path", type=int
+ )
+ parser.add_argument("--commit-sha", help="Commit SHA for the S3 path")
+ parser.add_argument(
+ "--no-upload", action="store_true", help="Do not upload the report"
+ )
+ parser.add_argument(
+ "--known-fails", type=str, help="Path to the file with known fails"
+ )
+ parser.add_argument(
+ "--cves", action="store_true", help="Get CVEs from Grype results"
+ )
+ parser.add_argument(
+ "--mark-preview", action="store_true", help="Mark the report as a preview"
+ )
+ return parser.parse_args()
+
+
+def main():
+ args = parse_args()
+
+ if args.pr_number is None or args.commit_sha is None:
+ run_details = get_run_details(args.actions_run_url)
+ if args.pr_number is None:
+ if len(run_details["pull_requests"]) > 0:
+ args.pr_number = run_details["pull_requests"][0]["number"]
+ else:
+ args.pr_number = 0
+ if args.commit_sha is None:
+ args.commit_sha = run_details["head_commit"]["id"]
+
+ db_client = Client(
+ host=os.getenv(DATABASE_HOST_VAR),
+ user=os.getenv(DATABASE_USER_VAR),
+ password=os.getenv(DATABASE_PASSWORD_VAR),
+ port=9440,
+ secure="y",
+ verify=False,
+ settings={"use_numpy": True},
+ )
+
+ run_details = get_run_details(args.actions_run_url)
+ branch_name = run_details.get("head_branch", "unknown branch")
+
+ fail_results = {
+ "job_statuses": get_commit_statuses(args.commit_sha),
+ "checks_fails": get_checks_fails(db_client, args.commit_sha, branch_name),
+ "checks_known_fails": [],
+ "pr_new_fails": [],
+ "checks_errors": get_checks_errors(db_client, args.commit_sha, branch_name),
+ "regression_fails": get_regression_fails(db_client, args.actions_run_url),
+ "docker_images_cves": (
+ [] if not args.cves else get_cves(args.pr_number, args.commit_sha)
+ ),
+ }
+
+ # get_cves returns ... in the case where no Grype result files were found.
+ # This might occur when run in preview mode.
+ cves_not_checked = not args.cves or fail_results["docker_images_cves"] is ...
+
+ if args.known_fails:
+ if not os.path.exists(args.known_fails):
+ print(f"Known fails file {args.known_fails} not found.")
+ exit(1)
+
+ with open(args.known_fails) as f:
+ known_fails = json.load(f)
+
+ if known_fails:
+ fail_results["checks_known_fails"] = get_checks_known_fails(
+ db_client, args.commit_sha, branch_name, known_fails
+ )
+
+ if args.pr_number == 0:
+ pr_info_html = f"Release ({branch_name})"
+ else:
+ try:
+ pr_info = get_pr_info_from_number(args.pr_number)
+ pr_info_html = f"""
+ #{pr_info.get("number")} ({pr_info.get("base", {}).get('ref')} <- {pr_info.get("head", {}).get('ref')}) {pr_info.get("title")}
+ """
+ fail_results["pr_new_fails"] = get_new_fails_this_pr(
+ db_client,
+ pr_info,
+ fail_results["checks_fails"],
+ fail_results["regression_fails"],
+ )
+ except Exception as e:
+ pr_info_html = e
+
+ high_cve_count = 0
+ if not cves_not_checked and len(fail_results["docker_images_cves"]) > 0:
+ high_cve_count = (
+ fail_results["docker_images_cves"]["severity"]
+ .str.lower()
+ .isin(("high", "critical"))
+ .sum()
+ )
+
+ # Define the context for rendering
+ context = {
+ "title": "ClickHouse® CI Workflow Run Report",
+ "github_repo": GITHUB_REPO,
+ "s3_bucket": S3_BUCKET,
+ "pr_info_html": pr_info_html,
+ "pr_number": args.pr_number,
+ "workflow_id": args.actions_run_url.split("/")[-1],
+ "commit_sha": args.commit_sha,
+ "base_sha": "" if args.pr_number == 0 else pr_info.get("base", {}).get("sha"),
+ "date": f"{datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')} UTC",
+ "is_preview": args.mark_preview,
+ "counts": {
+ "jobs_status": f"{sum(fail_results['job_statuses']['job_status'] != 'success')} fail/error",
+ "checks_errors": len(fail_results["checks_errors"]),
+ "checks_new_fails": len(fail_results["checks_fails"]),
+ "regression_new_fails": len(fail_results["regression_fails"]),
+ "cves": "N/A" if cves_not_checked else f"{high_cve_count} high/critical",
+ "checks_known_fails": (
+ "N/A"
+ if not args.known_fails
+ else len(fail_results["checks_known_fails"])
+ ),
+ "pr_new_fails": len(fail_results["pr_new_fails"]),
+ },
+ "ci_jobs_status_html": format_results_as_html_table(
+ fail_results["job_statuses"]
+ ),
+ "checks_errors_html": format_results_as_html_table(
+ fail_results["checks_errors"]
+ ),
+ "checks_fails_html": format_results_as_html_table(fail_results["checks_fails"]),
+ "regression_fails_html": format_results_as_html_table(
+ fail_results["regression_fails"]
+ ),
+ "docker_images_cves_html": (
+ "Not Checked
"
+ if cves_not_checked
+ else format_results_as_html_table(fail_results["docker_images_cves"])
+ ),
+ "checks_known_fails_html": (
+ "Not Checked
"
+ if not args.known_fails
+ else format_results_as_html_table(fail_results["checks_known_fails"])
+ ),
+ "new_fails_html": format_results_as_html_table(fail_results["pr_new_fails"]),
+ }
+
+ # Render the template with the context
+ rendered_html = template.render(context)
+
+ report_name = "ci_run_report.html"
+ report_path = Path(report_name)
+ report_path.write_text(rendered_html, encoding="utf-8")
+
+ if args.no_upload:
+ print(f"Report saved to {report_path}")
+ exit(0)
+
+ report_destination_key = f"{args.pr_number}/{args.commit_sha}/{report_name}"
+
+ # Upload the report to S3
+ s3_client = boto3.client("s3", endpoint_url=os.getenv("S3_URL"))
+
+ try:
+ s3_client.put_object(
+ Bucket=S3_BUCKET,
+ Key=report_destination_key,
+ Body=rendered_html,
+ ContentType="text/html; charset=utf-8",
+ )
+ except NoCredentialsError:
+ print("Credentials not available for S3 upload.")
+
+ print(f"https://s3.amazonaws.com/{S3_BUCKET}/" + report_destination_key)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/.github/actions/docker_setup/action.yml b/.github/actions/docker_setup/action.yml
new file mode 100644
index 000000000000..60c9a17519a6
--- /dev/null
+++ b/.github/actions/docker_setup/action.yml
@@ -0,0 +1,31 @@
+name: Docker setup
+description: Setup docker
+inputs:
+ test_name:
+ description: name of the test, used in determining ipv6 configs.
+ default: None
+ type: string
+runs:
+ using: "composite"
+ steps:
+ - name: Docker IPv6 configuration
+ shell: bash
+ env:
+ ipv6_subnet: ${{ contains(inputs.test_name, 'Integration') && '2001:db8:1::/64' || '2001:3984:3989::/64' }}
+ run: |
+ # make sure docker uses proper IPv6 config
+ sudo touch /etc/docker/daemon.json
+ sudo chown ubuntu:ubuntu /etc/docker/daemon.json
+ sudo cat < /etc/docker/daemon.json
+ {
+ "ipv6": true,
+ "fixed-cidr-v6": "${{ env.ipv6_subnet }}"
+ }
+ EOT
+ sudo chown root:root /etc/docker/daemon.json
+ sudo systemctl restart docker
+ sudo systemctl status docker
+ - name: Docker info
+ shell: bash
+ run: |
+ docker info
diff --git a/.github/grype/parse_vulnerabilities_grype.py b/.github/grype/parse_vulnerabilities_grype.py
new file mode 100644
index 000000000000..fec2ef3bfac7
--- /dev/null
+++ b/.github/grype/parse_vulnerabilities_grype.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python3
+import json
+
+from testflows.core import *
+
+xfails = {}
+
+
+@Name("docker vulnerabilities")
+@XFails(xfails)
+@TestModule
+def docker_vulnerabilities(self):
+ with Given("I gather grype scan results"):
+ with open("./result.json", "r") as f:
+ results = json.load(f)
+
+ for vulnerability in results["matches"]:
+ with Test(
+ f"{vulnerability['vulnerability']['id']}@{vulnerability['vulnerability']['namespace']},{vulnerability['vulnerability']['severity']}",
+ flags=TE,
+ ):
+ note(vulnerability)
+ critical_levels = set(["HIGH", "CRITICAL"])
+ if vulnerability['vulnerability']["severity"].upper() in critical_levels:
+ with Then(
+ f"Found vulnerability of {vulnerability['vulnerability']['severity']} severity"
+ ):
+ result(Fail)
+
+
+if main():
+ docker_vulnerabilities()
diff --git a/.github/grype/run_grype_scan.sh b/.github/grype/run_grype_scan.sh
new file mode 100755
index 000000000000..af428e37d669
--- /dev/null
+++ b/.github/grype/run_grype_scan.sh
@@ -0,0 +1,18 @@
+set -x
+set -e
+
+IMAGE=$1
+
+GRYPE_VERSION=${GRYPE_VERSION:-"v0.92.2"}
+
+docker pull $IMAGE
+docker pull anchore/grype:${GRYPE_VERSION}
+
+docker run \
+ --rm --volume /var/run/docker.sock:/var/run/docker.sock \
+ --name Grype anchore/grype:${GRYPE_VERSION} \
+ --scope all-layers \
+ -o json \
+ $IMAGE > result.json
+
+ls -sh
diff --git a/.github/grype/transform_and_upload_results_s3.sh b/.github/grype/transform_and_upload_results_s3.sh
new file mode 100755
index 000000000000..7a10b02887ef
--- /dev/null
+++ b/.github/grype/transform_and_upload_results_s3.sh
@@ -0,0 +1,13 @@
+DOCKER_IMAGE=$(echo "$DOCKER_IMAGE" | sed 's/[\/:]/_/g')
+
+S3_PATH="s3://$S3_BUCKET/$PR_NUMBER/$COMMIT_SHA/grype/$DOCKER_IMAGE"
+HTTPS_S3_PATH="https://s3.amazonaws.com/$S3_BUCKET/$PR_NUMBER/$COMMIT_SHA/grype/$DOCKER_IMAGE"
+echo "https_s3_path=$HTTPS_S3_PATH" >> $GITHUB_OUTPUT
+
+tfs --no-colors transform nice raw.log nice.log.txt
+tfs --no-colors report results -a $HTTPS_S3_PATH raw.log - --copyright "Altinity LTD" | tfs --no-colors document convert > results.html
+
+aws s3 cp --no-progress nice.log.txt $S3_PATH/nice.log.txt --content-type "text/plain; charset=utf-8" || echo "nice log file not found".
+aws s3 cp --no-progress results.html $S3_PATH/results.html || echo "results file not found".
+aws s3 cp --no-progress raw.log $S3_PATH/raw.log || echo "raw.log file not found".
+aws s3 cp --no-progress result.json $S3_PATH/result.json --content-type "text/plain; charset=utf-8" || echo "result.json not found".
\ No newline at end of file
diff --git a/.github/retry.sh b/.github/retry.sh
new file mode 100755
index 000000000000..566c2cf11315
--- /dev/null
+++ b/.github/retry.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+# Execute command until exitcode is 0 or
+# maximum number of retries is reached
+# Example:
+# ./retry
+retries=$1
+delay=$2
+command="${@:3}"
+exitcode=0
+try=0
+until [ "$try" -ge $retries ]
+do
+ echo "$command"
+ eval "$command"
+ exitcode=$?
+ if [ $exitcode -eq 0 ]; then
+ break
+ fi
+ try=$((try+1))
+ sleep $2
+done
+exit $exitcode
diff --git a/.github/workflows/README.md b/.github/workflows/README.md
new file mode 100644
index 000000000000..56415c2a7478
--- /dev/null
+++ b/.github/workflows/README.md
@@ -0,0 +1,13 @@
+## Scheduled Build Run Results
+
+Results for **the latest** release_workflow scheduled runs.
+
+| Branch | Status |
+| ------------ | - |
+| **`antalya`** | [](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Aantalya) |
+| **`project-antalya-24.12.2`** | [](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Aproject-antalya-24.12.2) |
+| **`customizations/22.8.21`** | [](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Acustomizations/22.8.21) |
+| **`customizations/23.3.19`** | [](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Acustomizations/23.3.19) |
+| **`customizations/23.8.16`** | [](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Acustomizations/23.8.16) |
+| **`customizations/24.3.14`** | [](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Acustomizations/24.3.14) |
+| **`customizations/24.8.11`** | [](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Acustomizations/24.8.11) |
diff --git a/.github/workflows/backport_branches.yml b/.github/workflows/backport_branches.yml
index 1f3f219946f7..3a819abbdbd5 100644
--- a/.github/workflows/backport_branches.yml
+++ b/.github/workflows/backport_branches.yml
@@ -1,1196 +1,281 @@
-# generated by praktika
-
+# yamllint disable rule:comments-indentation
name: BackportPR
-on:
- pull_request:
- branches: ['2[1-9].[1-9][0-9]', '2[1-9].[1-9]']
-
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
- DISABLE_CI_MERGE_COMMIT: ${{ vars.DISABLE_CI_MERGE_COMMIT || '0' }}
- DISABLE_CI_CACHE: ${{ vars.DISABLE_CI_CACHE || '0' }}
- CHECKOUT_REF: ${{ vars.DISABLE_CI_MERGE_COMMIT == '1' && github.event.pull_request.head.sha || '' }}
-# Allow updating GH commit statuses and PR comments to post an actual job reports link
-permissions: write-all
+on: # yamllint disable-line rule:truthy
+ push:
+ branches:
+ - 'backport/**'
-jobs:
+# Cancel the previous wf run in PRs.
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
- config_workflow:
+jobs:
+ RunConfig:
runs-on: [self-hosted, style-checker-aarch64]
- needs: []
- name: "Config Workflow"
outputs:
- data: ${{ steps.run.outputs.DATA }}
+ data: ${{ steps.runconfig.outputs.CI_DATA }}
steps:
- - name: Checkout code
- uses: actions/checkout@v4
+ - name: Check out repository code
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f
with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Config Workflow' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Config Workflow' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- dockers_build_arm:
+ clear-repository: true # to ensure correct digests
+ fetch-depth: 0 # to get version
+ filter: tree:0
+ - name: Debug Info
+ uses: ./.github/actions/debug
+ - name: Labels check
+ run: |
+ cd "$GITHUB_WORKSPACE/tests/ci"
+ python3 run_check.py
+ - name: Python unit tests
+ run: |
+ cd "$GITHUB_WORKSPACE/tests/ci"
+ echo "Testing the main ci directory"
+ python3 -m unittest discover -s . -p 'test_*.py'
+ - name: PrepareRunConfig
+ id: runconfig
+ run: |
+ echo "::group::configure CI run"
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --outfile ${{ runner.temp }}/ci_run_data.json
+ echo "::endgroup::"
+
+ echo "::group::CI run configure results"
+ python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
+ echo "::endgroup::"
+
+ {
+ echo 'CI_DATA<> "$GITHUB_OUTPUT"
+ - name: Re-create GH statuses for skipped jobs if any
+ run: |
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ runner.temp }}/ci_run_data.json --update-gh-statuses
+ BuildDockers:
+ needs: [RunConfig]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/docker_test_images.yml
+ with:
+ data: ${{ needs.RunConfig.outputs.data }}
+ CompatibilityCheckX86:
+ needs: [RunConfig, BuilderDebRelease]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ with:
+ test_name: Compatibility check (release)
+ runner_type: style-checker
+ data: ${{ needs.RunConfig.outputs.data }}
+ CompatibilityCheckAarch64:
+ needs: [RunConfig, BuilderDebAarch64]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ with:
+ test_name: Compatibility check (aarch64)
+ runner_type: style-checker
+ data: ${{ needs.RunConfig.outputs.data }}
+#########################################################################################
+#################################### ORDINARY BUILDS ####################################
+#########################################################################################
+ BuilderDebRelease:
+ needs: [RunConfig, BuildDockers]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_build.yml
+ with:
+ build_name: package_release
+ checkout_depth: 0
+ data: ${{ needs.RunConfig.outputs.data }}
+ BuilderDebAarch64:
+ needs: [RunConfig, BuildDockers]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_build.yml
+ with:
+ build_name: package_aarch64
+ checkout_depth: 0
+ data: ${{ needs.RunConfig.outputs.data }}
+ runner_type: builder-aarch64
+ BuilderDebAsan:
+ needs: [RunConfig, BuildDockers]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_build.yml
+ with:
+ build_name: package_asan
+ data: ${{ needs.RunConfig.outputs.data }}
+ BuilderDebTsan:
+ needs: [RunConfig, BuildDockers]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_build.yml
+ with:
+ build_name: package_tsan
+ data: ${{ needs.RunConfig.outputs.data }}
+ BuilderDebDebug:
+ needs: [RunConfig, BuildDockers]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_build.yml
+ with:
+ build_name: package_debug
+ data: ${{ needs.RunConfig.outputs.data }}
+ BuilderBinDarwin:
+ needs: [RunConfig, BuildDockers]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_build.yml
+ with:
+ build_name: binary_darwin
+ data: ${{ needs.RunConfig.outputs.data }}
+ checkout_depth: 0
+ BuilderBinDarwinAarch64:
+ needs: [RunConfig, BuildDockers]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_build.yml
+ with:
+ build_name: binary_darwin_aarch64
+ data: ${{ needs.RunConfig.outputs.data }}
+ checkout_depth: 0
+ runner_type: builder-aarch64
+############################################################################################
+##################################### Docker images #######################################
+############################################################################################
+ DockerServerImage:
+ needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ with:
+ test_name: Docker server image
+ runner_type: style-checker
+ data: ${{ needs.RunConfig.outputs.data }}
+ DockerKeeperImage:
+ needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ with:
+ test_name: Docker keeper image
+ runner_type: style-checker
+ data: ${{ needs.RunConfig.outputs.data }}
+############################################################################################
+##################################### BUILD REPORTER #######################################
+############################################################################################
+ Builds_Report:
+ # run report check for failed builds to indicate the CI error
+ if: ${{ !cancelled() && needs.RunConfig.result == 'success' && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Builds') }}
+ needs: [RunConfig, BuilderDebAarch64, BuilderDebAsan, BuilderDebDebug, BuilderDebRelease, BuilderDebTsan, BuilderBinDarwin, BuilderBinDarwinAarch64]
runs-on: [self-hosted, style-checker-aarch64]
- needs: [config_workflow]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYXJtKQ==') }}
- name: "Dockers Build (arm)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Dockers Build (arm)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Dockers Build (arm)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- dockers_build_amd_and_merge:
- runs-on: [self-hosted, style-checker]
- needs: [config_workflow, dockers_build_arm]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYW1kKSBhbmQgTWVyZ2U=') }}
- name: "Dockers Build (amd) and Merge"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Dockers Build (amd) and Merge' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Dockers Build (amd) and Merge' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_debug:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9kZWJ1Zyk=') }}
- name: "Build (amd_debug)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_debug)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_debug)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_release:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9yZWxlYXNlKQ==') }}
- name: "Build (amd_release)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_release)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_release)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_asan:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9hc2FuKQ==') }}
- name: "Build (amd_asan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_asan)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_asan)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_tsan:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF90c2FuKQ==') }}
- name: "Build (amd_tsan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_tsan)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_tsan)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_arm_release:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9yZWxlYXNlKQ==') }}
- name: "Build (arm_release)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (arm_release)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (arm_release)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_darwin:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9kYXJ3aW4p') }}
- name: "Build (amd_darwin)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_darwin)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_darwin)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_arm_darwin:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9kYXJ3aW4p') }}
- name: "Build (arm_darwin)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (arm_darwin)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (arm_darwin)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- docker_server_image:
+ - name: Check out repository code
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f
+ - name: Download reports
+ run: |
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(needs.RunConfig.outputs.data) }} --pre --job-name Builds
+ - name: Builds report
+ run: |
+ cd "$GITHUB_WORKSPACE/tests/ci"
+ python3 ./build_report_check.py --reports package_release package_aarch64 package_asan package_tsan package_debug binary_darwin binary_darwin_aarch64
+ - name: Set status
+ run: |
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(needs.RunConfig.outputs.data) }} --post --job-name Builds
+############################################################################################
+#################################### INSTALL PACKAGES ######################################
+############################################################################################
+ InstallPackagesTestRelease:
+ needs: [RunConfig, BuilderDebRelease]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ with:
+ test_name: Install packages (release)
+ runner_type: style-checker
+ data: ${{ needs.RunConfig.outputs.data }}
+ run_command: |
+ python3 install_check.py "$CHECK_NAME"
+ InstallPackagesTestAarch64:
+ needs: [RunConfig, BuilderDebAarch64]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ with:
+ test_name: Install packages (aarch64)
+ runner_type: altinity-on-demand, altinity-type-cax41, altinity-in-hel1, altinity-image-arm-app-docker-ce
+ data: ${{ needs.RunConfig.outputs.data }}
+ run_command: |
+ python3 install_check.py "$CHECK_NAME"
+##############################################################################################
+########################### FUNCTIONAl STATELESS TESTS #######################################
+##############################################################################################
+ FunctionalStatelessTestAsan:
+ needs: [RunConfig, BuilderDebAsan]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ with:
+ test_name: Stateless tests (asan)
+ runner_type: func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+##############################################################################################
+######################################### STRESS TESTS #######################################
+##############################################################################################
+ StressTestTsan:
+ needs: [RunConfig, BuilderDebTsan]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ with:
+ test_name: Stress test (tsan)
+ runner_type: altinity-on-demand, altinity-type-cpx51, altinity-in-ash, altinity-image-x86-system-ubuntu-22.04
+ data: ${{ needs.RunConfig.outputs.data }}
+#############################################################################################
+############################# INTEGRATION TESTS #############################################
+#############################################################################################
+ IntegrationTestsAsanOldAnalyzer:
+ needs: [RunConfig, BuilderDebAsan]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ with:
+ test_name: Integration tests (asan, old analyzer)
+ runner_type: altinity-on-demand, altinity-type-cpx51, altinity-in-ash, altinity-image-x86-system-ubuntu-22.04
+ data: ${{ needs.RunConfig.outputs.data }}
+ IntegrationTestsTsan:
+ needs: [RunConfig, BuilderDebTsan]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ with:
+ test_name: Integration tests (tsan)
+ runner_type: altinity-on-demand, altinity-type-cpx51, altinity-in-ash, altinity-image-x86-system-ubuntu-22.04
+ data: ${{ needs.RunConfig.outputs.data }}
+ FinishCheck:
+ if: ${{ !cancelled() }}
+ needs:
+ - RunConfig
+ - Builds_Report
+ - FunctionalStatelessTestAsan
+ - StressTestTsan
+ - IntegrationTestsTsan
+ - IntegrationTestsAsanOldAnalyzer
+ - CompatibilityCheckX86
+ - CompatibilityCheckAarch64
runs-on: [self-hosted, style-checker]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_release, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIHNlcnZlciBpbWFnZQ==') }}
- name: "Docker server image"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
steps:
- - name: Checkout code
- uses: actions/checkout@v4
+ - name: Check out repository code
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f
with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
+ clear-repository: true
+ - name: Finish label
+ if: ${{ !failure() }}
run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
+ export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
+ cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
${{ toJson(needs) }}
EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Docker server image' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Docker server image' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- docker_keeper_image:
- runs-on: [self-hosted, style-checker]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_release, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIGtlZXBlciBpbWFnZQ==') }}
- name: "Docker keeper image"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
+ cd "$GITHUB_WORKSPACE/tests/ci"
+ # update mergeable check
+ python3 merge_pr.py --set-ci-status
+ # update overall ci report
+ python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
+ python3 merge_pr.py
+ - name: Check Workflow results
+ if: ${{ !cancelled() }}
+ run: |
+ export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
+ cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
${{ toJson(needs) }}
EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Docker keeper image' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Docker keeper image' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- install_packages_release:
- runs-on: [self-hosted, style-checker]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAocmVsZWFzZSk=') }}
- name: "Install packages (release)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Install packages (release)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Install packages (release)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- install_packages_aarch64:
- runs-on: [self-hosted, style-checker-aarch64]
- needs: [config_workflow, dockers_build_amd_and_merge, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYWFyY2g2NCk=') }}
- name: "Install packages (aarch64)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Install packages (aarch64)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Install packages (aarch64)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- compatibility_check_release:
- runs-on: [self-hosted, style-checker]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAocmVsZWFzZSk=') }}
- name: "Compatibility check (release)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Compatibility check (release)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Compatibility check (release)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- compatibility_check_aarch64:
- runs-on: [self-hosted, style-checker-aarch64]
- needs: [config_workflow, dockers_build_amd_and_merge, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAoYWFyY2g2NCk=') }}
- name: "Compatibility check (aarch64)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Compatibility check (aarch64)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Compatibility check (aarch64)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_asan_1_2:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhc2FuLCAxLzIp') }}
- name: "Stateless tests (asan, 1/2)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (asan, 1/2)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (asan, 1/2)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_asan_2_2:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhc2FuLCAyLzIp') }}
- name: "Stateless tests (asan, 2/2)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (asan, 2/2)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (asan, 2/2)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- stress_test_tsan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKHRzYW4p') }}
- name: "Stress test (tsan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (tsan)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stress test (tsan)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_old_analyzer_1_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIG9sZCBhbmFseXplciwgMS82KQ==') }}
- name: "Integration tests (asan, old analyzer, 1/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, old analyzer, 1/6)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, old analyzer, 1/6)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_old_analyzer_2_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIG9sZCBhbmFseXplciwgMi82KQ==') }}
- name: "Integration tests (asan, old analyzer, 2/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, old analyzer, 2/6)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, old analyzer, 2/6)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_old_analyzer_3_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIG9sZCBhbmFseXplciwgMy82KQ==') }}
- name: "Integration tests (asan, old analyzer, 3/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, old analyzer, 3/6)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, old analyzer, 3/6)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_old_analyzer_4_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIG9sZCBhbmFseXplciwgNC82KQ==') }}
- name: "Integration tests (asan, old analyzer, 4/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, old analyzer, 4/6)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, old analyzer, 4/6)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_old_analyzer_5_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIG9sZCBhbmFseXplciwgNS82KQ==') }}
- name: "Integration tests (asan, old analyzer, 5/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, old analyzer, 5/6)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, old analyzer, 5/6)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_old_analyzer_6_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIG9sZCBhbmFseXplciwgNi82KQ==') }}
- name: "Integration tests (asan, old analyzer, 6/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, old analyzer, 6/6)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, old analyzer, 6/6)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_tsan_1_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHRzYW4sIDEvNik=') }}
- name: "Integration tests (tsan, 1/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (tsan, 1/6)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (tsan, 1/6)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_tsan_2_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHRzYW4sIDIvNik=') }}
- name: "Integration tests (tsan, 2/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (tsan, 2/6)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (tsan, 2/6)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_tsan_3_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHRzYW4sIDMvNik=') }}
- name: "Integration tests (tsan, 3/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (tsan, 3/6)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (tsan, 3/6)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_tsan_4_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHRzYW4sIDQvNik=') }}
- name: "Integration tests (tsan, 4/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (tsan, 4/6)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (tsan, 4/6)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_tsan_5_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHRzYW4sIDUvNik=') }}
- name: "Integration tests (tsan, 5/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (tsan, 5/6)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (tsan, 5/6)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_tsan_6_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHRzYW4sIDYvNik=') }}
- name: "Integration tests (tsan, 6/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_backportpr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (tsan, 6/6)' --workflow "BackportPR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (tsan, 6/6)' --workflow "BackportPR" --ci |& tee ./ci/tmp/job.log
- fi
+ python3 ./tests/ci/ci_buddy.py --check-wf-status
diff --git a/.github/workflows/cancel.yml b/.github/workflows/cancel.yml
new file mode 100644
index 000000000000..46ff5794b5ba
--- /dev/null
+++ b/.github/workflows/cancel.yml
@@ -0,0 +1,19 @@
+name: Cancel
+
+env:
+ # Force the stdout and stderr streams to be unbuffered
+ PYTHONUNBUFFERED: 1
+
+on: # yamllint disable-line rule:truthy
+ workflow_run:
+ workflows: ["PullRequestCI", "ReleaseBranchCI", "DocsCheck", "BackportPR"]
+ types:
+ - requested
+jobs:
+ cancel:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: styfle/cancel-workflow-action@0.9.1
+ with:
+ all_but_latest: true
+ workflow_id: ${{ github.event.workflow.id }}
diff --git a/.github/workflows/cherry_pick.yml b/.github/workflows/cherry_pick.yml
index 315673d4abcc..8e5191eb33cc 100644
--- a/.github/workflows/cherry_pick.yml
+++ b/.github/workflows/cherry_pick.yml
@@ -28,7 +28,7 @@ jobs:
REPO_TEAM=core
EOF
- name: Check out repository code
- uses: ClickHouse/checkout@v1
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f
with:
clear-repository: true
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
diff --git a/.github/workflows/compare_fails.yml b/.github/workflows/compare_fails.yml
new file mode 100644
index 000000000000..5dbfb9776a47
--- /dev/null
+++ b/.github/workflows/compare_fails.yml
@@ -0,0 +1,104 @@
+name: Compare CI Failures
+
+on:
+ workflow_dispatch:
+ inputs:
+ current_ref:
+ description: 'Current reference (commit hash or git tag) (default: current commit on selected branch)'
+ required: false
+ type: string
+ previous_ref:
+ description: 'Previous reference to compare with (commit hash, git tag or workflow url) (default: previous stable tag for current reference)'
+ required: false
+ type: string
+ upstream_ref:
+ description: 'Upstream reference to compare with (commit hash, git tag or MAJOR.MINOR version) (default: previous lts tag for current reference)'
+ required: false
+ type: string
+ include_broken:
+ description: 'Include BROKEN tests in comparison'
+ required: false
+ type: boolean
+ default: false
+ push:
+ tags:
+ - 'v*.altinity*'
+
+env:
+ CHECKS_DATABASE_HOST: ${{ secrets.CHECKS_DATABASE_HOST }}
+ CHECKS_DATABASE_USER: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }}
+ CHECKS_DATABASE_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }}
+
+jobs:
+ Compare:
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
+ steps:
+ - name: Check commit status
+ run: |
+ if [[ "${{ github.event_name }}" == "workflow_dispatch" && -n "${{ inputs.current_ref }}" ]]; then
+ # For workflow_dispatch with custom ref, skip the check
+ exit 0
+ fi
+
+ # Query GitHub API for commit status
+ STATUSES=$(curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ "https://api.github.com/repos/${{ github.repository }}/commits/${{ github.sha }}/status")
+
+ # Check if there are any statuses
+ if [ "$(echo $STATUSES | jq '.total_count')" -eq 0 ]; then
+ echo "No commit statuses found for ${{ github.sha }}. Assuming tests have not run yet. Aborting workflow."
+ exit 1
+ fi
+
+ echo "Found commit statuses, proceeding with comparison."
+
+ - name: Check out repository code
+ uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+ ref: ${{ inputs.current_ref || github.ref }}
+
+ - name: Set up Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.x'
+ cache: 'pip'
+
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install clickhouse-driver requests pandas tabulate
+
+ - name: Set default refs
+ id: default_refs
+ run: |
+ VERSION=$(git describe --tags --abbrev=0 | sed 's/v\([0-9]\+\.[0-9]\+\).*/\1/')
+ echo "Detected version: $VERSION"
+
+ PREVIOUS_TAG_COMMIT=$(git log -1 --until=yesterday --tags=v${VERSION}*.altinity* | grep -Po "(?<=commit ).*")
+ echo "PREVIOUS_TAG: $(git tag --contains $PREVIOUS_TAG_COMMIT | grep 'altinity') $PREVIOUS_TAG_COMMIT"
+ UPSTREAM_TAG_COMMIT=$(git log -1 --tags=v${VERSION}*-lts | grep -Po "(?<=commit ).*")
+ echo "UPSTREAM_TAG: $(git tag --contains $UPSTREAM_TAG_COMMIT | grep 'lts') $UPSTREAM_TAG_COMMIT"
+
+ echo "PREVIOUS_TAG_COMMIT=$PREVIOUS_TAG_COMMIT" >> $GITHUB_OUTPUT
+ echo "UPSTREAM_TAG_COMMIT=$UPSTREAM_TAG_COMMIT" >> $GITHUB_OUTPUT
+
+ - name: Comparison report
+ if: ${{ !cancelled() }}
+ run: |
+ git clone https://github.com/Altinity/actions.git
+ cd actions
+ git checkout c5751cefd4f56bd7300b5f6d84a5ae9d0b686772
+ python3 scripts/compare_ci_fails.py \
+ --current-ref ${{ inputs.current_ref || github.sha }} \
+ --previous-ref ${{ inputs.previous_ref || steps.default_refs.outputs.PREVIOUS_TAG_COMMIT }} \
+ --upstream-ref ${{ inputs.upstream_ref || steps.default_refs.outputs.UPSTREAM_TAG_COMMIT }} \
+ ${{ inputs.include_broken && '--broken' || '' }}
+ cat comparison_results.md >> $GITHUB_STEP_SUMMARY
+
+ - name: Upload comparison results
+ uses: actions/upload-artifact@v4
+ with:
+ name: comparison-results
+ path: |
+ actions/comparison_results.md
diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml
index b53bf1720963..da8197e3de80 100644
--- a/.github/workflows/create_release.yml
+++ b/.github/workflows/create_release.yml
@@ -70,7 +70,7 @@ jobs:
runs-on: [self-hosted, release-maker]
steps:
- name: Check out repository code
- uses: ClickHouse/checkout@v1
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f
with:
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
fetch-depth: 0
diff --git a/.github/workflows/docker_publish.yml b/.github/workflows/docker_publish.yml
new file mode 100644
index 000000000000..65ca24cc1354
--- /dev/null
+++ b/.github/workflows/docker_publish.yml
@@ -0,0 +1,120 @@
+name: Republish Multiarch Docker Image
+
+on:
+ workflow_dispatch:
+ inputs:
+ docker_image:
+ description: 'Multiarch Docker image with tag'
+ required: true
+ release_environment:
+ description: 'Select release type: "staging" or "production"'
+ type: choice
+ default: 'staging'
+ options:
+ - staging
+ - production
+ upload_artifacts:
+ description: 'Upload artifacts directly in this workflow'
+ type: boolean
+ default: true
+ workflow_call:
+ inputs:
+ docker_image:
+ type: string
+ required: true
+ release_environment:
+ type: string
+ required: false
+ default: 'staging'
+ upload_artifacts:
+ type: boolean
+ required: false
+ default: false
+ outputs:
+ image_archives_path:
+ description: 'Path to the image archives directory'
+ value: ${{ jobs.republish.outputs.image_archives_path }}
+
+env:
+ IMAGE: ${{ github.event.inputs.docker_image || inputs.docker_image }}
+
+jobs:
+ republish:
+ runs-on: self-hosted, altinity-style-checker-aarch64
+ outputs:
+ image_archives_path: ${{ steps.set_path.outputs.image_archives_path }}
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v3
+
+ - name: Docker Hub Login
+ uses: docker/login-action@v2
+ with:
+ username: ${{ secrets.DOCKER_USERNAME }}
+ password: ${{ secrets.DOCKER_PASSWORD }}
+
+ - name: Set clickhouse-server version as new tag
+ run: |
+ # Determine "clickhouse-server" or "clickhouse-keeper"
+ echo "COMPONENT=$(echo "$IMAGE" | sed -E 's|.*/(clickhouse-[^:]+):.*|\1|')" >> $GITHUB_ENV
+ echo "Component determined: $COMPONENT"
+
+ # Run the container to get the version
+ CONTAINER_HASH=$(docker run -d --rm $IMAGE 2>&1)
+ NEW_TAG=$(.github/retry.sh 30 10 docker exec $CONTAINER_HASH bash -c "$COMPONENT --version")
+ echo "Base tag from clickhouse version: $NEW_TAG"
+
+ # Append "-prerelease" if necessary
+ if [ "${{ github.event.inputs.release_type || inputs.release_type }}" = "staging" ]; then
+ NEW_TAG="${BASE_TAG}-prerelease"
+ fi
+
+ if [[ "$IMAGE" == *-alpine* ]]; then
+ NEW_TAG="${NEW_TAG}-alpine"
+ fi
+ echo "New tag: $NEW_TAG"
+
+ # Export the new tag
+ echo "new_tag=$NEW_TAG" >> $GITHUB_ENV
+
+ - name: Process multiarch manifest
+ run: |
+ echo "Re-tag multiarch image $IMAGE to altinity/$COMPONENT:$NEW_TAG"
+ docker buildx imagetools create --tag "altinity/$COMPONENT:$NEW_TAG" "$IMAGE"
+
+ # Create directory for image archives
+ mkdir -p image_archives
+
+ # Pull and save platform-specific images
+ for PLATFORM in "linux/amd64" "linux/arm64"; do
+ echo "Pulling and saving image for $PLATFORM..."
+ # Pull the specific platform image
+ docker pull --platform $PLATFORM "altinity/$COMPONENT:$NEW_TAG"
+
+ # Save the image to a tar file
+ ARCH=$(echo $PLATFORM | cut -d'/' -f2)
+ docker save "altinity/$COMPONENT:$NEW_TAG" -o "image_archives/${COMPONENT}-${NEW_TAG}-${ARCH}.tar"
+ done
+
+ # Save manifest inspection
+ docker buildx imagetools inspect "altinity/$COMPONENT:$NEW_TAG" > image_archives/manifest.txt
+
+ # Compress the archives
+ cd image_archives
+ for file in *.tar; do
+ gzip "$file"
+ done
+ cd ..
+
+ - name: Set image archives path
+ id: set_path
+ run: |
+ echo "image_archives_path=${{ github.workspace }}/image_archives" >> $GITHUB_OUTPUT
+
+ - name: Upload image archives
+ if: ${{ github.event.inputs.upload_artifacts || inputs.upload_artifacts }}
+ uses: actions/upload-artifact@v3
+ with:
+ name: docker-images-backup
+ path: image_archives/
+ retention-days: 90
diff --git a/.github/workflows/docker_test_images.yml b/.github/workflows/docker_test_images.yml
index 4cc9e4c8a820..1b8d94279407 100644
--- a/.github/workflows/docker_test_images.yml
+++ b/.github/workflows/docker_test_images.yml
@@ -1,5 +1,4 @@
name: Build docker images
-
'on':
workflow_call:
inputs:
@@ -12,18 +11,33 @@ name: Build docker images
required: false
type: boolean
default: false
+ secrets:
+ secret_envs:
+ description: if given, it's passed to the environments
+ required: false
+ AWS_SECRET_ACCESS_KEY:
+ description: the access key to the aws param store.
+ required: true
+ AWS_ACCESS_KEY_ID:
+ description: the access key id to the aws param store.
+ required: true
env:
- PYTHONUNBUFFERED: 1
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
+ DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
+
jobs:
DockerBuildAarch64:
- runs-on: [self-hosted, style-checker-aarch64]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64]
if: |
!failure() && !cancelled() && toJson(fromJson(inputs.data).docker_data.missing_aarch64) != '[]'
steps:
- name: Check out repository code
- uses: ClickHouse/checkout@v1
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6
with:
ref: ${{ fromJson(inputs.data).git_ref }}
- name: Build images
@@ -33,12 +47,12 @@ jobs:
--image-tags '${{ toJson(fromJson(inputs.data).docker_data.images) }}' \
--missing-images '${{ toJson(fromJson(inputs.data).docker_data.missing_aarch64) }}'
DockerBuildAmd64:
- runs-on: [self-hosted, style-checker]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
if: |
!failure() && !cancelled() && toJson(fromJson(inputs.data).docker_data.missing_amd64) != '[]'
steps:
- name: Check out repository code
- uses: ClickHouse/checkout@v1
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6
with:
ref: ${{ fromJson(inputs.data).git_ref }}
- name: Build images
@@ -49,12 +63,12 @@ jobs:
--missing-images '${{ toJson(fromJson(inputs.data).docker_data.missing_amd64) }}'
DockerMultiArchManifest:
needs: [DockerBuildAmd64, DockerBuildAarch64]
- runs-on: [self-hosted, style-checker]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
if: |
!failure() && !cancelled() && (toJson(fromJson(inputs.data).docker_data.missing_multi) != '[]' || inputs.set_latest)
steps:
- name: Check out repository code
- uses: ClickHouse/checkout@v1
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6
with:
ref: ${{ fromJson(inputs.data).git_ref }}
- name: Build images
@@ -65,7 +79,9 @@ jobs:
FLAG_LATEST='--set-latest'
echo "latest tag will be set for resulting manifests"
fi
+ # NOTE(strtgbb): The --no-reports flag avoids a strange error setting the commit status
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64 \
--image-tags '${{ toJson(fromJson(inputs.data).docker_data.images) }}' \
--missing-images '${{ toJson(fromJson(inputs.data).docker_data.missing_multi) }}' \
+ --no-reports \
$FLAG_LATEST
diff --git a/.github/workflows/grype_scan.yml b/.github/workflows/grype_scan.yml
new file mode 100644
index 000000000000..b6781c386f94
--- /dev/null
+++ b/.github/workflows/grype_scan.yml
@@ -0,0 +1,152 @@
+name: Grype Scan
+run-name: Grype Scan ${{ inputs.docker_image }}
+
+on:
+ workflow_dispatch:
+ # Inputs for manual run
+ inputs:
+ docker_image:
+ description: 'Docker image. If no tag, it will be determined by version_helper.py'
+ required: true
+ workflow_call:
+ # Inputs for workflow call
+ inputs:
+ docker_image:
+ description: 'Docker image. If no tag, it will be determined by version_helper.py'
+ required: true
+ type: string
+ version:
+ description: 'Version tag. If no version, it will be determined by version_helper.py'
+ required: false
+ type: string
+ default: ""
+ tag-suffix:
+ description: 'Tag suffix. To be appended the version from version_helper.py'
+ required: false
+ type: string
+ default: ""
+env:
+ PYTHONUNBUFFERED: 1
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ GRYPE_VERSION: "v0.92.2-arm64v8"
+
+jobs:
+ grype_scan:
+ name: Grype Scan
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64]
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Set up Docker
+ uses: docker/setup-buildx-action@v3
+
+ - name: Set up Python
+ run: |
+ export TESTFLOWS_VERSION="2.4.19"
+ sudo apt-get update
+ sudo apt-get install -y python3-pip python3-venv
+ python3 -m venv venv
+ source venv/bin/activate
+ pip install --upgrade requests chardet urllib3 unidiff boto3 PyGithub
+ pip install testflows==$TESTFLOWS_VERSION awscli==1.33.28
+ echo PATH=$PATH >>$GITHUB_ENV
+
+ - name: Set image tag if not given
+ if: ${{ !contains(inputs.docker_image, ':') }}
+ id: set_version
+ env:
+ TAG_SUFFIX: ${{ inputs.tag-suffix }}
+ SPECIFIED_VERSION: ${{ inputs.version }}
+ run: |
+ python3 ./tests/ci/version_helper.py | grep = | tee /tmp/version_info
+ source /tmp/version_info
+ if [ -z "$SPECIFIED_VERSION" ]; then
+ VERSION=$CLICKHOUSE_VERSION_STRING
+ else
+ VERSION=$SPECIFIED_VERSION
+ fi
+ echo "docker_image=${{ inputs.docker_image }}:$PR_NUMBER-$VERSION$TAG_SUFFIX" >> $GITHUB_OUTPUT
+ echo "commit_sha=$CLICKHOUSE_VERSION_GITHASH" >> $GITHUB_OUTPUT
+
+ - name: Run Grype Scan
+ run: |
+ DOCKER_IMAGE=${{ steps.set_version.outputs.docker_image || inputs.docker_image }}
+ ./.github/grype/run_grype_scan.sh $DOCKER_IMAGE
+
+ - name: Parse grype results
+ run: |
+ python3 -u ./.github/grype/parse_vulnerabilities_grype.py -o nice --no-colors --log raw.log --test-to-end
+
+ - name: Transform and Upload Grype Results
+ if: always()
+ id: upload_results
+ env:
+ S3_BUCKET: "altinity-build-artifacts"
+ COMMIT_SHA: ${{ steps.set_version.outputs.commit_sha || github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
+ PR_NUMBER: ${{ env.PR_NUMBER || github.event.pull_request.number || 0 }}
+ DOCKER_IMAGE: ${{ steps.set_version.outputs.docker_image || inputs.docker_image }}
+ run: |
+ echo "PR_NUMBER=$PR_NUMBER"
+ ./.github/grype/transform_and_upload_results_s3.sh
+
+ - name: Create step summary
+ if: always()
+ id: create_summary
+ run: |
+ jq -r '"**Image**: \(.source.target.userInput)"' result.json >> $GITHUB_STEP_SUMMARY
+ jq -r '.distro | "**Distro**: \(.name):\(.version)"' result.json >> $GITHUB_STEP_SUMMARY
+ if jq -e '.matches | length == 0' result.json > /dev/null; then
+ echo "No CVEs" >> $GITHUB_STEP_SUMMARY
+ else
+ echo "| Severity | Count |" >> $GITHUB_STEP_SUMMARY
+ echo "|------------|-------|" >> $GITHUB_STEP_SUMMARY
+ jq -r '
+ .matches |
+ map(.vulnerability.severity) |
+ group_by(.) |
+ map({severity: .[0], count: length}) |
+ sort_by(.severity) |
+ map("| \(.severity) | \(.count) |") |
+ .[]
+ ' result.json >> $GITHUB_STEP_SUMMARY
+ fi
+
+ HIGH_COUNT=$(jq -r '.matches | map(.vulnerability.severity) | map(select(. == "High")) | length' result.json)
+ CRITICAL_COUNT=$(jq -r '.matches | map(.vulnerability.severity) | map(select(. == "Critical")) | length' result.json)
+ TOTAL_HIGH_CRITICAL=$((HIGH_COUNT + CRITICAL_COUNT))
+ echo "total_high_critical=$TOTAL_HIGH_CRITICAL" >> $GITHUB_OUTPUT
+
+ if [ $TOTAL_HIGH_CRITICAL -gt 0 ]; then
+ echo '## High and Critical vulnerabilities found' >> $GITHUB_STEP_SUMMARY
+ echo '```' >> $GITHUB_STEP_SUMMARY
+ cat raw.log | tfs --no-colors show tests | grep -Pi 'High|Critical' >> $GITHUB_STEP_SUMMARY
+ echo '```' >> $GITHUB_STEP_SUMMARY
+ fi
+
+ - name: Set commit status
+ if: always()
+ uses: actions/github-script@v7
+ with:
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ script: |
+ github.rest.repos.createCommitStatus({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ sha: '${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}',
+ state: '${{ steps.create_summary.outputs.total_high_critical > 0 && 'failure' || 'success' }}',
+ target_url: '${{ steps.upload_results.outputs.https_s3_path }}/results.html',
+ description: 'Grype Scan Completed with ${{ steps.create_summary.outputs.total_high_critical }} high/critical vulnerabilities',
+ context: 'Grype Scan ${{ steps.set_version.outputs.docker_image || inputs.docker_image }}'
+ })
+
+ - name: Upload artifacts
+ if: always()
+ uses: actions/upload-artifact@v4
+ with:
+ name: grype-results-${{ hashFiles('raw.log') }}
+ path: |
+ result.json
+ nice.log.txt
diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml
index 7b7d950eea16..09a6c2dbc747 100644
--- a/.github/workflows/master.yml
+++ b/.github/workflows/master.yml
@@ -1,4158 +1,142 @@
-# generated by praktika
-
+# yamllint disable rule:comments-indentation
name: MasterCI
-on:
- push:
- branches: ['master']
-
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
- CHECKOUT_REF: ""
-
-# Allow updating GH commit statuses and PR comments to post an actual job reports link
-permissions: write-all
+on: # yamllint disable-line rule:truthy
+ push:
+ branches:
+ - 'master'
jobs:
-
- config_workflow:
+ RunConfig:
runs-on: [self-hosted, style-checker-aarch64]
- needs: []
- name: "Config Workflow"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Config Workflow' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Config Workflow' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_tidy:
- runs-on: [self-hosted, builder]
- needs: [config_workflow]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF90aWR5KQ==') }}
- name: "Build (amd_tidy)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_tidy)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_tidy)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_debug:
- runs-on: [self-hosted, builder]
- needs: [config_workflow]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9kZWJ1Zyk=') }}
- name: "Build (amd_debug)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_debug)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_debug)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_release:
- runs-on: [self-hosted, builder]
- needs: [config_workflow]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9yZWxlYXNlKQ==') }}
- name: "Build (amd_release)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_asan:
- runs-on: [self-hosted, builder]
- needs: [config_workflow]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9hc2FuKQ==') }}
- name: "Build (amd_asan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_asan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_asan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_tsan:
- runs-on: [self-hosted, builder]
- needs: [config_workflow]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF90c2FuKQ==') }}
- name: "Build (amd_tsan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_tsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_tsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_msan:
- runs-on: [self-hosted, builder]
- needs: [config_workflow]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9tc2FuKQ==') }}
- name: "Build (amd_msan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_msan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_msan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_ubsan:
- runs-on: [self-hosted, builder]
- needs: [config_workflow]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF91YnNhbik=') }}
- name: "Build (amd_ubsan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_ubsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_ubsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_binary:
- runs-on: [self-hosted, builder]
- needs: [config_workflow]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9iaW5hcnkp') }}
- name: "Build (amd_binary)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_binary)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_binary)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_arm_release:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9yZWxlYXNlKQ==') }}
- name: "Build (arm_release)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (arm_release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (arm_release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_arm_asan:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9hc2FuKQ==') }}
- name: "Build (arm_asan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (arm_asan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (arm_asan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_coverage:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, build_amd_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9jb3ZlcmFnZSk=') }}
- name: "Build (amd_coverage)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_coverage)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_coverage)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_arm_binary:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, build_amd_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9iaW5hcnkp') }}
- name: "Build (arm_binary)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (arm_binary)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (arm_binary)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_darwin:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, build_amd_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9kYXJ3aW4p') }}
- name: "Build (amd_darwin)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_darwin)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_darwin)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_arm_darwin:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, build_amd_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9kYXJ3aW4p') }}
- name: "Build (arm_darwin)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (arm_darwin)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (arm_darwin)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_arm_v80compat:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, build_amd_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV92ODBjb21wYXQp') }}
- name: "Build (arm_v80compat)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (arm_v80compat)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (arm_v80compat)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_freebsd:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, build_amd_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9mcmVlYnNkKQ==') }}
- name: "Build (amd_freebsd)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_freebsd)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_freebsd)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_ppc64le:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, build_amd_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKHBwYzY0bGUp') }}
- name: "Build (ppc64le)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (ppc64le)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (ppc64le)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_compat:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, build_amd_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9jb21wYXQp') }}
- name: "Build (amd_compat)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_compat)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_compat)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_musl:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, build_amd_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9tdXNsKQ==') }}
- name: "Build (amd_musl)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_musl)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_musl)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_riscv64:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, build_amd_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKHJpc2N2NjQp') }}
- name: "Build (riscv64)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (riscv64)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (riscv64)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_s390x:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, build_amd_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKHMzOTB4KQ==') }}
- name: "Build (s390x)"
outputs:
- data: ${{ steps.run.outputs.DATA }}
+ data: ${{ steps.runconfig.outputs.CI_DATA }}
+ steps:
+ - name: Check out repository code
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f
+ with:
+ clear-repository: true # to ensure correct digests
+ fetch-depth: 0 # to get version
+ filter: tree:0
+ - name: Debug Info
+ uses: ./.github/actions/debug
+ - name: Merge sync PR
+ run: |
+ cd "$GITHUB_WORKSPACE/tests/ci"
+ python3 sync_pr.py --merge || :
+# Runs in MQ:
+# - name: Python unit tests
+# run: |
+# cd "$GITHUB_WORKSPACE/tests/ci"
+# echo "Testing the main ci directory"
+# python3 -m unittest discover -s . -p 'test_*.py'
+ - name: PrepareRunConfig
+ id: runconfig
+ run: |
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --outfile ${{ runner.temp }}/ci_run_data.json
+
+ echo "::group::CI configuration"
+ python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
+ echo "::endgroup::"
+
+ {
+ echo 'CI_DATA<> "$GITHUB_OUTPUT"
+ - name: Re-create GH statuses for skipped jobs if any
+ run: |
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ runner.temp }}/ci_run_data.json --update-gh-statuses
+# Runs in MQ:
+# BuildDockers:
+# needs: [RunConfig]
+# if: ${{ !failure() && !cancelled() }}
+# uses: ./.github/workflows/docker_test_images.yml
+# with:
+# data: ${{ needs.RunConfig.outputs.data }}
+ # StyleCheck:
+ # needs: [RunConfig, BuildDockers]
+ # if: ${{ !failure() && !cancelled() }}
+ # uses: ./.github/workflows/reusable_test.yml
+ # with:
+ # test_name: Style check
+ # runner_type: style-checker
+ # data: ${{ needs.RunConfig.outputs.data }}
+ # run_command: |
+ # python3 style_check.py --no-push
+
+ ################################# Main stages #################################
+ # for main CI chain
+ #
+ Builds_1:
+ needs: [RunConfig]
+ if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Builds_1') }}
+ # using callable wf (reusable_stage.yml) allows grouping all nested jobs under a tab
+ uses: ./.github/workflows/reusable_build_stage.yml
+ with:
+ stage: Builds_1
+ data: ${{ needs.RunConfig.outputs.data }}
+ Tests_1:
+ needs: [RunConfig, Builds_1]
+ if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_1') }}
+ uses: ./.github/workflows/reusable_test_stage.yml
+ with:
+ stage: Tests_1
+ data: ${{ needs.RunConfig.outputs.data }}
+ Builds_2:
+ needs: [RunConfig, Builds_1]
+ if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Builds_2') }}
+ uses: ./.github/workflows/reusable_build_stage.yml
+ with:
+ stage: Builds_2
+ data: ${{ needs.RunConfig.outputs.data }}
+ Tests_2_ww:
+ needs: [RunConfig, Builds_2]
+ if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_2_ww') }}
+ uses: ./.github/workflows/reusable_test_stage.yml
+ with:
+ stage: Tests_2_ww
+ data: ${{ needs.RunConfig.outputs.data }}
+ Tests_2:
+ # Test_3 should not wait for Test_1/Test_2 and should not be blocked by them on master branch since all jobs need to run there.
+ needs: [RunConfig, Builds_1]
+ if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_2') }}
+ uses: ./.github/workflows/reusable_test_stage.yml
+ with:
+ stage: Tests_2
+ data: ${{ needs.RunConfig.outputs.data }}
+
+ ################################# Reports #################################
+ # Reports should run even if Builds_1/2 fail - run them separately, not in Tests_1/2/3
+ Builds_Report:
+ # run report check for failed builds to indicate the CI error
+ if: ${{ !cancelled() && needs.RunConfig.result == 'success' && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Builds') }}
+ needs: [RunConfig, Builds_1, Builds_2]
+ uses: ./.github/workflows/reusable_test.yml
+ with:
+ test_name: Builds
+ runner_type: altinity-on-demand, altinity-type-cax41, altinity-in-hel1, altinity-image-arm-app-docker-ce
+ data: ${{ needs.RunConfig.outputs.data }}
+
+ FinishCheck:
+ if: ${{ !cancelled() }}
+ needs: [RunConfig, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2_ww, Tests_2]
+ runs-on: [self-hosted, style-checker-aarch64]
steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
+ - name: Check out repository code
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f
+ - name: Finish label
run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (s390x)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (s390x)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_loongarch64:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, build_amd_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGxvb25nYXJjaDY0KQ==') }}
- name: "Build (loongarch64)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
+ cd "$GITHUB_WORKSPACE/tests/ci"
+ python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
+ - name: Check Workflow results
+ if: ${{ !cancelled() }}
run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
+ export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
+ cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
${{ toJson(needs) }}
EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (loongarch64)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (loongarch64)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_fuzzers:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, build_amd_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGZ1enplcnMp') }}
- name: "Build (fuzzers)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (fuzzers)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (fuzzers)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- unit_tests_binary:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'VW5pdCB0ZXN0cyAoYmluYXJ5KQ==') }}
- name: "Unit tests (binary)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Unit tests (binary)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Unit tests (binary)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- unit_tests_asan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'VW5pdCB0ZXN0cyAoYXNhbik=') }}
- name: "Unit tests (asan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Unit tests (asan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Unit tests (asan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- unit_tests_tsan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'VW5pdCB0ZXN0cyAodHNhbik=') }}
- name: "Unit tests (tsan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Unit tests (tsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Unit tests (tsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- unit_tests_msan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_msan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'VW5pdCB0ZXN0cyAobXNhbik=') }}
- name: "Unit tests (msan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Unit tests (msan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Unit tests (msan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- unit_tests_ubsan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_ubsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'VW5pdCB0ZXN0cyAodWJzYW4p') }}
- name: "Unit tests (ubsan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Unit tests (ubsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Unit tests (ubsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- docker_server_image:
- runs-on: [self-hosted, style-checker]
- needs: [config_workflow, build_amd_release, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIHNlcnZlciBpbWFnZQ==') }}
- name: "Docker server image"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Docker server image' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Docker server image' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- docker_keeper_image:
- runs-on: [self-hosted, style-checker]
- needs: [config_workflow, build_amd_release, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIGtlZXBlciBpbWFnZQ==') }}
- name: "Docker keeper image"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Docker keeper image' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Docker keeper image' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- install_packages_release:
- runs-on: [self-hosted, style-checker]
- needs: [config_workflow, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAocmVsZWFzZSk=') }}
- name: "Install packages (release)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Install packages (release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Install packages (release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- install_packages_aarch64:
- runs-on: [self-hosted, style-checker-aarch64]
- needs: [config_workflow, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYWFyY2g2NCk=') }}
- name: "Install packages (aarch64)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Install packages (aarch64)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Install packages (aarch64)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- compatibility_check_release:
- runs-on: [self-hosted, style-checker]
- needs: [config_workflow, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAocmVsZWFzZSk=') }}
- name: "Compatibility check (release)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Compatibility check (release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Compatibility check (release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- compatibility_check_aarch64:
- runs-on: [self-hosted, style-checker-aarch64]
- needs: [config_workflow, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAoYWFyY2g2NCk=') }}
- name: "Compatibility check (aarch64)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Compatibility check (aarch64)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Compatibility check (aarch64)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_asan_1_2:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhc2FuLCAxLzIp') }}
- name: "Stateless tests (asan, 1/2)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (asan, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (asan, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_asan_2_2:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhc2FuLCAyLzIp') }}
- name: "Stateless tests (asan, 2/2)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (asan, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (asan, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_release:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChyZWxlYXNlKQ==') }}
- name: "Stateless tests (release)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_release_old_analyzer_s3_databasereplicated_1_2:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChyZWxlYXNlLCBvbGQgYW5hbHl6ZXIsIHMzLCBEYXRhYmFzZVJlcGxpY2F0ZWQsIDEvMik=') }}
- name: "Stateless tests (release, old analyzer, s3, DatabaseReplicated, 1/2)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (release, old analyzer, s3, DatabaseReplicated, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (release, old analyzer, s3, DatabaseReplicated, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_release_old_analyzer_s3_databasereplicated_2_2:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChyZWxlYXNlLCBvbGQgYW5hbHl6ZXIsIHMzLCBEYXRhYmFzZVJlcGxpY2F0ZWQsIDIvMik=') }}
- name: "Stateless tests (release, old analyzer, s3, DatabaseReplicated, 2/2)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (release, old analyzer, s3, DatabaseReplicated, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (release, old analyzer, s3, DatabaseReplicated, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_release_parallelreplicas_s3_storage:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChyZWxlYXNlLCBQYXJhbGxlbFJlcGxpY2FzLCBzMyBzdG9yYWdlKQ==') }}
- name: "Stateless tests (release, ParallelReplicas, s3 storage)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (release, ParallelReplicas, s3 storage)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (release, ParallelReplicas, s3 storage)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_debug:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_debug]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChkZWJ1Zyk=') }}
- name: "Stateless tests (debug)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (debug)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (debug)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_tsan_1_3:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzICh0c2FuLCAxLzMp') }}
- name: "Stateless tests (tsan, 1/3)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (tsan, 1/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (tsan, 1/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_tsan_2_3:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzICh0c2FuLCAyLzMp') }}
- name: "Stateless tests (tsan, 2/3)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (tsan, 2/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (tsan, 2/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_tsan_3_3:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzICh0c2FuLCAzLzMp') }}
- name: "Stateless tests (tsan, 3/3)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (tsan, 3/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (tsan, 3/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_msan_1_4:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_msan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChtc2FuLCAxLzQp') }}
- name: "Stateless tests (msan, 1/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (msan, 1/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (msan, 1/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_msan_2_4:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_msan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChtc2FuLCAyLzQp') }}
- name: "Stateless tests (msan, 2/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (msan, 2/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (msan, 2/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_msan_3_4:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_msan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChtc2FuLCAzLzQp') }}
- name: "Stateless tests (msan, 3/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (msan, 3/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (msan, 3/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_msan_4_4:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_msan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChtc2FuLCA0LzQp') }}
- name: "Stateless tests (msan, 4/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (msan, 4/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (msan, 4/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_ubsan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_ubsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzICh1YnNhbik=') }}
- name: "Stateless tests (ubsan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (ubsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (ubsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_debug_s3_storage:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_debug]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChkZWJ1ZywgczMgc3RvcmFnZSk=') }}
- name: "Stateless tests (debug, s3 storage)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (debug, s3 storage)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (debug, s3 storage)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_tsan_s3_storage_1_3:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzICh0c2FuLCBzMyBzdG9yYWdlLCAxLzMp') }}
- name: "Stateless tests (tsan, s3 storage, 1/3)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (tsan, s3 storage, 1/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (tsan, s3 storage, 1/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_tsan_s3_storage_2_3:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzICh0c2FuLCBzMyBzdG9yYWdlLCAyLzMp') }}
- name: "Stateless tests (tsan, s3 storage, 2/3)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (tsan, s3 storage, 2/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (tsan, s3 storage, 2/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_tsan_s3_storage_3_3:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzICh0c2FuLCBzMyBzdG9yYWdlLCAzLzMp') }}
- name: "Stateless tests (tsan, s3 storage, 3/3)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (tsan, s3 storage, 3/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (tsan, s3 storage, 3/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_aarch64:
- runs-on: [self-hosted, func-tester-aarch64]
- needs: [config_workflow, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhYXJjaDY0KQ==') }}
- name: "Stateless tests (aarch64)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (aarch64)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (aarch64)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_azure_asan_1_3:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhenVyZSwgYXNhbiwgMS8zKQ==') }}
- name: "Stateless tests (azure, asan, 1/3)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (azure, asan, 1/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (azure, asan, 1/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_azure_asan_2_3:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhenVyZSwgYXNhbiwgMi8zKQ==') }}
- name: "Stateless tests (azure, asan, 2/3)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (azure, asan, 2/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (azure, asan, 2/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_azure_asan_3_3:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhenVyZSwgYXNhbiwgMy8zKQ==') }}
- name: "Stateless tests (azure, asan, 3/3)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (azure, asan, 3/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (azure, asan, 3/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_old_analyzer_1_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIG9sZCBhbmFseXplciwgMS82KQ==') }}
- name: "Integration tests (asan, old analyzer, 1/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, old analyzer, 1/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, old analyzer, 1/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_old_analyzer_2_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIG9sZCBhbmFseXplciwgMi82KQ==') }}
- name: "Integration tests (asan, old analyzer, 2/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, old analyzer, 2/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, old analyzer, 2/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_old_analyzer_3_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIG9sZCBhbmFseXplciwgMy82KQ==') }}
- name: "Integration tests (asan, old analyzer, 3/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, old analyzer, 3/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, old analyzer, 3/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_old_analyzer_4_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIG9sZCBhbmFseXplciwgNC82KQ==') }}
- name: "Integration tests (asan, old analyzer, 4/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, old analyzer, 4/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, old analyzer, 4/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_old_analyzer_5_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIG9sZCBhbmFseXplciwgNS82KQ==') }}
- name: "Integration tests (asan, old analyzer, 5/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, old analyzer, 5/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, old analyzer, 5/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_old_analyzer_6_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIG9sZCBhbmFseXplciwgNi82KQ==') }}
- name: "Integration tests (asan, old analyzer, 6/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, old analyzer, 6/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, old analyzer, 6/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_release_1_4:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHJlbGVhc2UsIDEvNCk=') }}
- name: "Integration tests (release, 1/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (release, 1/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (release, 1/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_release_2_4:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHJlbGVhc2UsIDIvNCk=') }}
- name: "Integration tests (release, 2/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (release, 2/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (release, 2/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_release_3_4:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHJlbGVhc2UsIDMvNCk=') }}
- name: "Integration tests (release, 3/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (release, 3/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (release, 3/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_release_4_4:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHJlbGVhc2UsIDQvNCk=') }}
- name: "Integration tests (release, 4/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (release, 4/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (release, 4/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_aarch64_1_4:
- runs-on: [self-hosted, func-tester-aarch64]
- needs: [config_workflow, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFhcmNoNjQsIDEvNCk=') }}
- name: "Integration tests (aarch64, 1/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (aarch64, 1/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (aarch64, 1/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_aarch64_2_4:
- runs-on: [self-hosted, func-tester-aarch64]
- needs: [config_workflow, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFhcmNoNjQsIDIvNCk=') }}
- name: "Integration tests (aarch64, 2/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (aarch64, 2/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (aarch64, 2/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_aarch64_3_4:
- runs-on: [self-hosted, func-tester-aarch64]
- needs: [config_workflow, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFhcmNoNjQsIDMvNCk=') }}
- name: "Integration tests (aarch64, 3/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (aarch64, 3/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (aarch64, 3/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_aarch64_4_4:
- runs-on: [self-hosted, func-tester-aarch64]
- needs: [config_workflow, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFhcmNoNjQsIDQvNCk=') }}
- name: "Integration tests (aarch64, 4/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (aarch64, 4/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (aarch64, 4/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_tsan_1_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHRzYW4sIDEvNik=') }}
- name: "Integration tests (tsan, 1/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (tsan, 1/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (tsan, 1/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_tsan_2_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHRzYW4sIDIvNik=') }}
- name: "Integration tests (tsan, 2/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (tsan, 2/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (tsan, 2/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_tsan_3_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHRzYW4sIDMvNik=') }}
- name: "Integration tests (tsan, 3/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (tsan, 3/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (tsan, 3/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_tsan_4_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHRzYW4sIDQvNik=') }}
- name: "Integration tests (tsan, 4/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (tsan, 4/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (tsan, 4/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_tsan_5_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHRzYW4sIDUvNik=') }}
- name: "Integration tests (tsan, 5/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (tsan, 5/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (tsan, 5/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_tsan_6_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHRzYW4sIDYvNik=') }}
- name: "Integration tests (tsan, 6/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (tsan, 6/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (tsan, 6/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_coverage_1_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_coverage, stateless_tests_asan_1_2, stateless_tests_asan_2_2]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChjb3ZlcmFnZSwgMS82KQ==') }}
- name: "Stateless tests (coverage, 1/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (coverage, 1/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (coverage, 1/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_coverage_2_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_coverage, stateless_tests_asan_1_2, stateless_tests_asan_2_2]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChjb3ZlcmFnZSwgMi82KQ==') }}
- name: "Stateless tests (coverage, 2/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (coverage, 2/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (coverage, 2/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_coverage_3_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_coverage, stateless_tests_asan_1_2, stateless_tests_asan_2_2]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChjb3ZlcmFnZSwgMy82KQ==') }}
- name: "Stateless tests (coverage, 3/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (coverage, 3/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (coverage, 3/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_coverage_4_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_coverage, stateless_tests_asan_1_2, stateless_tests_asan_2_2]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChjb3ZlcmFnZSwgNC82KQ==') }}
- name: "Stateless tests (coverage, 4/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (coverage, 4/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (coverage, 4/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_coverage_5_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_coverage, stateless_tests_asan_1_2, stateless_tests_asan_2_2]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChjb3ZlcmFnZSwgNS82KQ==') }}
- name: "Stateless tests (coverage, 5/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (coverage, 5/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (coverage, 5/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_coverage_6_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_coverage, stateless_tests_asan_1_2, stateless_tests_asan_2_2]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChjb3ZlcmFnZSwgNi82KQ==') }}
- name: "Stateless tests (coverage, 6/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (coverage, 6/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (coverage, 6/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stress_test_debug:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_debug]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGRlYnVnKQ==') }}
- name: "Stress test (debug)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (debug)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stress test (debug)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stress_test_tsan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKHRzYW4p') }}
- name: "Stress test (tsan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (tsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stress test (tsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stress_test_asan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGFzYW4p') }}
- name: "Stress test (asan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (asan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stress test (asan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stress_test_ubsan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_ubsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKHVic2FuKQ==') }}
- name: "Stress test (ubsan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (ubsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stress test (ubsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stress_test_msan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_msan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKG1zYW4p') }}
- name: "Stress test (msan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (msan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stress test (msan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stress_test_azure_tsan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGF6dXJlLCB0c2FuKQ==') }}
- name: "Stress test (azure, tsan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (azure, tsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stress test (azure, tsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stress_test_azure_msan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_msan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGF6dXJlLCBtc2FuKQ==') }}
- name: "Stress test (azure, msan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (azure, msan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stress test (azure, msan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- clickbench_release:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'Q2xpY2tCZW5jaCAocmVsZWFzZSk=') }}
- name: "ClickBench (release)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'ClickBench (release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'ClickBench (release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- clickbench_aarch64:
- runs-on: [self-hosted, func-tester-aarch64]
- needs: [config_workflow, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'Q2xpY2tCZW5jaCAoYWFyY2g2NCk=') }}
- name: "ClickBench (aarch64)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'ClickBench (aarch64)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'ClickBench (aarch64)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- ast_fuzzer_debug:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_debug]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QVNUIGZ1enplciAoZGVidWcp') }}
- name: "AST fuzzer (debug)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'AST fuzzer (debug)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'AST fuzzer (debug)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- ast_fuzzer_asan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QVNUIGZ1enplciAoYXNhbik=') }}
- name: "AST fuzzer (asan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'AST fuzzer (asan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'AST fuzzer (asan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- ast_fuzzer_tsan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QVNUIGZ1enplciAodHNhbik=') }}
- name: "AST fuzzer (tsan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'AST fuzzer (tsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'AST fuzzer (tsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- ast_fuzzer_msan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_msan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QVNUIGZ1enplciAobXNhbik=') }}
- name: "AST fuzzer (msan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'AST fuzzer (msan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'AST fuzzer (msan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- ast_fuzzer_ubsan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_ubsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QVNUIGZ1enplciAodWJzYW4p') }}
- name: "AST fuzzer (ubsan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'AST fuzzer (ubsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'AST fuzzer (ubsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- buzzhouse_debug:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_debug]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnV6ekhvdXNlIChkZWJ1Zyk=') }}
- name: "BuzzHouse (debug)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'BuzzHouse (debug)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'BuzzHouse (debug)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- buzzhouse_asan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnV6ekhvdXNlIChhc2FuKQ==') }}
- name: "BuzzHouse (asan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'BuzzHouse (asan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'BuzzHouse (asan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- buzzhouse_tsan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnV6ekhvdXNlICh0c2FuKQ==') }}
- name: "BuzzHouse (tsan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'BuzzHouse (tsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'BuzzHouse (tsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- buzzhouse_msan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_msan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnV6ekhvdXNlIChtc2FuKQ==') }}
- name: "BuzzHouse (msan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'BuzzHouse (msan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'BuzzHouse (msan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- buzzhouse_ubsan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_ubsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnV6ekhvdXNlICh1YnNhbik=') }}
- name: "BuzzHouse (ubsan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'BuzzHouse (ubsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'BuzzHouse (ubsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- performance_comparison_release_1_3:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAocmVsZWFzZSwgMS8zKQ==') }}
- name: "Performance Comparison (release, 1/3)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Performance Comparison (release, 1/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Performance Comparison (release, 1/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- performance_comparison_release_2_3:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAocmVsZWFzZSwgMi8zKQ==') }}
- name: "Performance Comparison (release, 2/3)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Performance Comparison (release, 2/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Performance Comparison (release, 2/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- performance_comparison_release_3_3:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAocmVsZWFzZSwgMy8zKQ==') }}
- name: "Performance Comparison (release, 3/3)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Performance Comparison (release, 3/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Performance Comparison (release, 3/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- sqlancer_release:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U1FMYW5jZXIgKHJlbGVhc2Up') }}
- name: "SQLancer (release)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'SQLancer (release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'SQLancer (release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- sqlancer_debug:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_debug]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U1FMYW5jZXIgKGRlYnVnKQ==') }}
- name: "SQLancer (debug)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'SQLancer (debug)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'SQLancer (debug)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- sqltest:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U1FMVGVzdA==') }}
- name: "SQLTest"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'SQLTest' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'SQLTest' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
+ python3 ./tests/ci/ci_buddy.py --check-wf-status
diff --git a/.github/workflows/merge_queue.yml b/.github/workflows/merge_queue.yml
index 31898d1471b0..34061130aeb3 100644
--- a/.github/workflows/merge_queue.yml
+++ b/.github/workflows/merge_queue.yml
@@ -1,279 +1,114 @@
-# generated by praktika
-
+# yamllint disable rule:comments-indentation
name: MergeQueueCI
-on:
- merge_group:
-
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
- CHECKOUT_REF: ""
+on: # yamllint disable-line rule:truthy
+ merge_group:
jobs:
-
- config_workflow:
- runs-on: [self-hosted, style-checker-aarch64]
- needs: []
- name: "Config Workflow"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_mergequeueci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Config Workflow' --workflow "MergeQueueCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Config Workflow' --workflow "MergeQueueCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- dockers_build_arm:
+ RunConfig:
runs-on: [self-hosted, style-checker-aarch64]
- needs: [config_workflow]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYXJtKQ==') }}
- name: "Dockers Build (arm)"
outputs:
- data: ${{ steps.run.outputs.DATA }}
+ data: ${{ steps.runconfig.outputs.CI_DATA }}
steps:
- - name: Checkout code
- uses: actions/checkout@v4
+ - name: Check out repository code
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f
with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
+ clear-repository: true # to ensure correct digests
+ fetch-depth: 0 # to get a version
+ filter: tree:0
+ - name: Debug Info
+ uses: ./.github/actions/debug
+ - name: Cancel PR workflow
run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_mergequeueci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Dockers Build (arm)' --workflow "MergeQueueCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Dockers Build (arm)' --workflow "MergeQueueCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- dockers_build_amd_and_merge:
- runs-on: [self-hosted, style-checker]
- needs: [config_workflow, dockers_build_arm]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYW1kKSBhbmQgTWVyZ2U=') }}
- name: "Dockers Build (amd) and Merge"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --cancel-previous-run
+ - name: Python unit tests
run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_mergequeueci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
+ cd "$GITHUB_WORKSPACE/tests/ci"
+ echo "Testing the main ci directory"
+ python3 -m unittest discover -s . -p 'test_*.py'
+ - name: PrepareRunConfig
+ id: runconfig
run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Dockers Build (amd) and Merge' --workflow "MergeQueueCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Dockers Build (amd) and Merge' --workflow "MergeQueueCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- style_check:
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --outfile ${{ runner.temp }}/ci_run_data.json
+
+ echo "::group::CI configuration"
+ python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
+ echo "::endgroup::"
+
+ {
+ echo 'CI_DATA<> "$GITHUB_OUTPUT"
+ BuildDockers:
+ needs: [RunConfig]
+ if: ${{ !failure() && !cancelled() && toJson(fromJson(needs.RunConfig.outputs.data).docker_data.missing_multi) != '[]' }}
+ uses: ./.github/workflows/docker_test_images.yml
+ with:
+ data: ${{ needs.RunConfig.outputs.data }}
+ StyleCheck:
+ needs: [RunConfig, BuildDockers]
+ if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Style check')}}
+ uses: ./.github/workflows/reusable_test.yml
+ with:
+ test_name: Style check
+ runner_type: altinity-on-demand, altinity-type-cax41, altinity-in-hel1, altinity-image-arm-app-docker-ce
+ run_command: |
+ python3 style_check.py --no-push
+ data: ${{ needs.RunConfig.outputs.data }}
+ FastTest:
+ needs: [RunConfig, BuildDockers]
+ if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Fast test') }}
+ uses: ./.github/workflows/reusable_test.yml
+ with:
+ test_name: Fast test
+ runner_type: builder
+ data: ${{ needs.RunConfig.outputs.data }}
+ run_command: |
+ python3 fast_test_check.py
+
+ Builds_1:
+ needs: [RunConfig, BuildDockers]
+ if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Builds_1') }}
+ # using callable wf (reusable_stage.yml) allows grouping all nested jobs under a tab
+ uses: ./.github/workflows/reusable_build_stage.yml
+ with:
+ stage: Builds_1
+ data: ${{ needs.RunConfig.outputs.data }}
+ Tests_1:
+ needs: [RunConfig, Builds_1]
+ if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_1') }}
+ uses: ./.github/workflows/reusable_test_stage.yml
+ with:
+ stage: Tests_1
+ data: ${{ needs.RunConfig.outputs.data }}
+
+ CheckReadyForMerge:
+ if: ${{ !cancelled() }}
+ # Test_2 or Test_3 must not have jobs required for Mergeable check
+ needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Tests_1]
runs-on: [self-hosted, style-checker-aarch64]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3R5bGUgY2hlY2s=') }}
- name: "Style check"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_mergequeueci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Style check' --workflow "MergeQueueCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Style check' --workflow "MergeQueueCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- fast_test:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RmFzdCB0ZXN0') }}
- name: "Fast test"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_mergequeueci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Fast test' --workflow "MergeQueueCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Fast test' --workflow "MergeQueueCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_binary:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9iaW5hcnkp') }}
- name: "Build (amd_binary)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
+ - name: Check out repository code
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f
+ - name: Check and set merge status
+ if: ${{ needs.StyleCheck.result == 'success' }}
run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_mergequeueci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
+ export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
+ cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
${{ toJson(needs) }}
EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_binary)' --workflow "MergeQueueCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_binary)' --workflow "MergeQueueCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- finish_workflow:
- runs-on: [self-hosted, style-checker-aarch64]
- needs: [config_workflow, dockers_build_arm, dockers_build_amd_and_merge, style_check, fast_test, build_amd_binary]
- if: ${{ !cancelled() }}
- name: "Finish Workflow"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
+ cd "$GITHUB_WORKSPACE/tests/ci"
+ python3 merge_pr.py --set-ci-status
+ - name: Check Workflow results
+ if: ${{ !cancelled() }}
run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_mergequeueci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
+ export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
+ cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
${{ toJson(needs) }}
EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Finish Workflow' --workflow "MergeQueueCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Finish Workflow' --workflow "MergeQueueCI" --ci |& tee ./ci/tmp/job.log
- fi
+ python3 ./tests/ci/ci_buddy.py --check-wf-status
diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml
index ace191fc09a3..87cc85e0e9e3 100644
--- a/.github/workflows/nightly.yml
+++ b/.github/workflows/nightly.yml
@@ -16,7 +16,7 @@ jobs:
data: ${{ steps.runconfig.outputs.CI_DATA }}
steps:
- name: Check out repository code
- uses: ClickHouse/checkout@v1
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f
with:
clear-repository: true # to ensure correct digests
fetch-depth: 0 # to get version
@@ -80,7 +80,7 @@ jobs:
runs-on: [self-hosted, style-checker-aarch64]
steps:
- name: Check out repository code
- uses: ClickHouse/checkout@v1
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f
- name: Check Workflow results
if: ${{ !cancelled() }}
run: |
diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml
index 5f365a5c3e1f..6e56ae9edd70 100644
--- a/.github/workflows/pull_request.yml
+++ b/.github/workflows/pull_request.yml
@@ -19,7 +19,7 @@ permissions: write-all
jobs:
config_workflow:
- runs-on: [self-hosted, style-checker-aarch64]
+ runs-on: [self-hosted, altinity-on-demand, altinity-type-cax41, altinity-in-hel1, altinity-image-arm-app-docker-ce]
needs: []
name: "Config Workflow"
outputs:
diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml
new file mode 100644
index 000000000000..965418f820d3
--- /dev/null
+++ b/.github/workflows/regression.yml
@@ -0,0 +1,731 @@
+name: Regression test workflow - Release
+'on':
+ workflow_call:
+ inputs:
+ runner_type:
+ description: the label of runner to use, can be a simple string or a comma-separated list
+ required: true
+ type: string
+ commit:
+ description: commit hash of the regression tests.
+ required: true
+ type: string
+ arch:
+ description: arch to run the tests on.
+ required: true
+ type: string
+ timeout_minutes:
+ description: Maximum number of minutes to let workflow run before GitHub cancels it.
+ default: 210
+ type: number
+ build_sha:
+ description: commit sha of the workflow run for artifact upload.
+ required: true
+ type: string
+ checkout_depth:
+ description: the value of the git shallow checkout
+ required: false
+ type: number
+ default: 1
+ submodules:
+ description: if the submodules should be checked out
+ required: false
+ type: boolean
+ default: false
+ additional_envs:
+ description: additional ENV variables to setup the job
+ type: string
+ secrets:
+ secret_envs:
+ description: if given, it's passed to the environments
+ required: false
+ AWS_SECRET_ACCESS_KEY:
+ description: the access key to the aws param store.
+ required: true
+ AWS_ACCESS_KEY_ID:
+ description: the access key id to the aws param store.
+ required: true
+ AWS_DEFAULT_REGION:
+ description: the region of the aws param store.
+ required: true
+ AWS_REPORT_KEY_ID:
+ description: aws s3 key id used for regression test reports.
+ required: true
+ AWS_REPORT_SECRET_ACCESS_KEY:
+ description: aws s3 secret access key used for regression test reports.
+ required: true
+ AWS_REPORT_REGION:
+ description: aws s3 region used for regression test reports.
+ required: true
+ DOCKER_USERNAME:
+ description: username of the docker user.
+ required: true
+ DOCKER_PASSWORD:
+ description: password to the docker user.
+ required: true
+ REGRESSION_AWS_S3_BUCKET:
+ description: aws s3 bucket used for regression tests.
+ required: true
+ REGRESSION_AWS_S3_KEY_ID:
+ description: aws s3 key id used for regression tests.
+ required: true
+ REGRESSION_AWS_S3_SECRET_ACCESS_KEY:
+ description: aws s3 secret access key used for regression tests.
+ required: true
+ REGRESSION_AWS_S3_REGION:
+ description: aws s3 region used for regression tests.
+ required: true
+ REGRESSION_GCS_KEY_ID:
+ description: gcs key id used for regression tests.
+ required: true
+ REGRESSION_GCS_KEY_SECRET:
+ description: gcs key secret used for regression tests.
+ required: true
+ REGRESSION_GCS_URI:
+ description: gcs uri used for regression tests.
+ required: true
+
+env:
+ # Force the stdout and stderr streams to be unbuffered
+ PYTHONUNBUFFERED: 1
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
+ DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
+ DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
+ CHECKS_DATABASE_HOST: ${{ secrets.CHECKS_DATABASE_HOST }}
+ CHECKS_DATABASE_USER: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }}
+ CHECKS_DATABASE_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }}
+ args: --test-to-end
+ --no-colors
+ --local
+ --collect-service-logs
+ --output classic
+ --parallel 1
+ --log raw.log
+ --with-analyzer
+ artifacts: builds
+ artifact_paths: |
+ ./report.html
+ ./*.log.txt
+ ./*.log
+ ./*.html
+ ./*/_instances/*.log
+ ./*/_instances/*/logs/*.log
+ ./*/*/_instances/*/logs/*.log
+ ./*/*/_instances/*.log
+ build_sha: ${{ inputs.build_sha }}
+ pr_number: ${{ github.event.number }}
+ event_name: ${{ github.event_name }}
+
+jobs:
+ runner_labels_setup:
+ name: Compute proper runner labels for the rest of the jobs
+ runs-on: ubuntu-latest
+ outputs:
+ runner_labels: ${{ steps.setVariables.outputs.runner_labels }}
+ steps:
+ - id: setVariables
+ name: Prepare runner_labels variables for the later steps
+ run: |
+
+ # Prepend self-hosted
+ input="self-hosted, ${input}"
+
+ # Remove all whitespace
+ input="$(echo ${input} | tr -d [:space:])"
+ # Make something like a JSON array from comma-separated list
+ input="[ '${input//\,/\'\, \'}' ]"
+
+ echo "runner_labels=$input" >> ${GITHUB_OUTPUT}
+ env:
+ input: ${{ inputs.runner_type }}
+
+ Common:
+ strategy:
+ fail-fast: false
+ matrix:
+ SUITE: [aes_encryption, aggregate_functions, atomic_insert, base_58, clickhouse_keeper, data_types, datetime64_extended_range, disk_level_encryption, dns, engines, example, extended_precision_data_types, iceberg, kafka, kerberos, key_value, lightweight_delete, memory, part_moves_between_shards, rbac, selects, session_timezone, ssl_server, swarms, tiered_storage, version, window_functions]
+ needs: [runner_labels_setup]
+ runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }}
+ timeout-minutes: ${{ inputs.timeout_minutes }}
+ steps:
+ - name: Checkout regression repo
+ uses: actions/checkout@v4
+ with:
+ repository: Altinity/clickhouse-regression
+ ref: ${{ inputs.commit }}
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ REPORTS_PATH=${{ runner.temp }}/reports_dir
+ SUITE=${{ matrix.SUITE }}
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v4
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ name: build_report_package_${{ inputs.arch }}
+ - name: Rename reports
+ run: |
+ mv ${{ env.REPORTS_PATH }}/build_report_*.json ${{ env.REPORTS_PATH }}/build_report_package_${{ inputs.arch }}.json
+ - name: Setup
+ run: .github/setup.sh
+ - name: Get deb url
+ run: python3 .github/get-deb-url.py --reports-path ${{ env.REPORTS_PATH }} --github-env $GITHUB_ENV
+ - name: Run ${{ env.SUITE }} suite
+ id: run_suite
+ run: EXITCODE=0;
+ python3
+ -u ${{ env.SUITE }}/regression.py
+ --clickhouse-binary-path ${{ env.clickhouse_path }}
+ --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.name="$GITHUB_JOB (${{ matrix.SUITE }})" job.retry=$GITHUB_RUN_ATTEMPT job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)"
+ ${{ env.args }} || EXITCODE=$?;
+ .github/add_link_to_logs.sh;
+ exit $EXITCODE
+ - name: Set Commit Status
+ if: always()
+ run: python3 .github/set_builds_status.py
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ JOB_OUTCOME: ${{ steps.run_suite.outcome }}
+ SUITE_NAME: "Regression ${{ inputs.arch }} ${{ matrix.SUITE }}"
+ - name: Create and upload logs
+ if: always()
+ run: .github/create_and_upload_logs.sh 1
+ - name: Upload logs to regression results database
+ if: always()
+ timeout-minutes: 20
+ run: .github/upload_results_to_database.sh 1
+ - uses: actions/upload-artifact@v4
+ if: always()
+ with:
+ name: ${{ env.SUITE }}-${{ inputs.arch }}-artifacts
+ path: ${{ env.artifact_paths}}
+
+ Alter:
+ strategy:
+ fail-fast: false
+ matrix:
+ ONLY: [replace, attach, move]
+ needs: [runner_labels_setup]
+ runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }}
+ timeout-minutes: ${{ inputs.timeout_minutes }}
+ steps:
+ - name: Checkout regression repo
+ uses: actions/checkout@v4
+ with:
+ repository: Altinity/clickhouse-regression
+ ref: ${{ inputs.commit }}
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ REPORTS_PATH=${{ runner.temp }}/reports_dir
+ SUITE=alter
+ STORAGE=/${{ matrix.ONLY }}_partition
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v4
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ name: build_report_package_${{ inputs.arch }}
+ - name: Rename reports
+ run: |
+ mv ${{ env.REPORTS_PATH }}/build_report_*.json ${{ env.REPORTS_PATH }}/build_report_package_${{ inputs.arch }}.json
+ - name: Setup
+ run: .github/setup.sh
+ - name: Get deb url
+ run: python3 .github/get-deb-url.py --reports-path ${{ env.REPORTS_PATH }} --github-env $GITHUB_ENV
+ - name: Run ${{ env.SUITE }} suite
+ id: run_suite
+ run: EXITCODE=0;
+ python3
+ -u alter/regression.py
+ --clickhouse-binary-path ${{ env.clickhouse_path }}
+ --only "/alter/${{ matrix.ONLY }} partition/*"
+ --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.name="$GITHUB_JOB (${{ matrix.ONLY }})" job.retry=$GITHUB_RUN_ATTEMPT job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)"
+ ${{ env.args }} || EXITCODE=$?;
+ .github/add_link_to_logs.sh;
+ exit $EXITCODE
+ - name: Set Commit Status
+ if: always()
+ run: python3 .github/set_builds_status.py
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ JOB_OUTCOME: ${{ steps.run_suite.outcome }}
+ SUITE_NAME: "Regression ${{ inputs.arch }} Alter ${{ matrix.ONLY }} partition"
+ - name: Create and upload logs
+ if: always()
+ run: .github/create_and_upload_logs.sh 1
+ - name: Upload logs to regression results database
+ if: always()
+ timeout-minutes: 20
+ run: .github/upload_results_to_database.sh 1
+ - uses: actions/upload-artifact@v4
+ if: always()
+ with:
+ name: alter-${{ matrix.ONLY }}-${{ inputs.arch }}-artifacts
+ path: ${{ env.artifact_paths}}
+
+ Benchmark:
+ strategy:
+ fail-fast: false
+ matrix:
+ STORAGE: [minio, aws_s3, gcs]
+ needs: [runner_labels_setup]
+ runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }}
+ timeout-minutes: ${{ inputs.timeout_minutes }}
+ steps:
+ - name: Checkout regression repo
+ uses: actions/checkout@v4
+ with:
+ repository: Altinity/clickhouse-regression
+ ref: ${{ inputs.commit }}
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ REPORTS_PATH=${{ runner.temp }}/reports_dir
+ SUITE=ontime_benchmark
+ STORAGE=/${{ matrix.STORAGE }}
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v4
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ name: build_report_package_${{ inputs.arch }}
+ - name: Rename reports
+ run: |
+ mv ${{ env.REPORTS_PATH }}/build_report_*.json ${{ env.REPORTS_PATH }}/build_report_package_${{ inputs.arch }}.json
+ - name: Setup
+ run: .github/setup.sh
+ - name: Get deb url
+ run: python3 .github/get-deb-url.py --reports-path ${{ env.REPORTS_PATH }} --github-env $GITHUB_ENV
+ - name: Run ${{ env.SUITE }} suite
+ id: run_suite
+ run: EXITCODE=0;
+ python3
+ -u ${{ env.SUITE }}/benchmark.py
+ --clickhouse-binary-path ${{ env.clickhouse_path }}
+ --storage ${{ matrix.STORAGE }}
+ --gcs-uri ${{ secrets.REGRESSION_GCS_URI }}
+ --gcs-key-id ${{ secrets.REGRESSION_GCS_KEY_ID }}
+ --gcs-key-secret ${{ secrets.REGRESSION_GCS_KEY_SECRET }}
+ --aws-s3-bucket ${{ secrets.REGRESSION_AWS_S3_BUCKET }}
+ --aws-s3-region ${{ secrets.REGRESSION_AWS_S3_REGION }}
+ --aws-s3-key-id ${{ secrets.REGRESSION_AWS_S3_KEY_ID }}
+ --aws-s3-access-key ${{ secrets.REGRESSION_AWS_S3_SECRET_ACCESS_KEY }}
+ --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.name="$GITHUB_JOB (${{ matrix.STORAGE }})" job.retry=$GITHUB_RUN_ATTEMPT job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)"
+ ${{ env.args }} || EXITCODE=$?;
+ .github/add_link_to_logs.sh;
+ exit $EXITCODE
+ - name: Set Commit Status
+ if: always()
+ run: python3 .github/set_builds_status.py
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ JOB_OUTCOME: ${{ steps.run_suite.outcome }}
+ SUITE_NAME: "Regression ${{ inputs.arch }} Benchmark ${{ matrix.STORAGE }}"
+ - name: Create and upload logs
+ if: always()
+ run: .github/create_and_upload_logs.sh 1
+ - name: Upload logs to regression results database
+ if: always()
+ timeout-minutes: 20
+ run: .github/upload_results_to_database.sh 1
+ - uses: actions/upload-artifact@v4
+ if: always()
+ with:
+ name: benchmark-${{ matrix.STORAGE }}-${{ inputs.arch }}-artifacts
+ path: ${{ env.artifact_paths }}
+
+ ClickHouseKeeperSSL:
+ needs: [runner_labels_setup]
+ runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }}
+ timeout-minutes: ${{ inputs.timeout_minutes }}
+ steps:
+ - name: Checkout regression repo
+ uses: actions/checkout@v4
+ with:
+ repository: Altinity/clickhouse-regression
+ ref: ${{ inputs.commit }}
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ REPORTS_PATH=${{runner.temp}}/reports_dir
+ SUITE=clickhouse_keeper
+ STORAGE=/ssl
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v4
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ name: build_report_package_${{ inputs.arch }}
+ - name: Rename reports
+ run: |
+ mv ${{ env.REPORTS_PATH }}/build_report_*.json ${{ env.REPORTS_PATH }}/build_report_package_${{ inputs.arch }}.json
+ - name: Setup
+ run: .github/setup.sh
+ - name: Get deb url
+ run: python3 .github/get-deb-url.py --reports-path ${{ env.REPORTS_PATH }} --github-env $GITHUB_ENV
+ - name: Run ${{ env.SUITE }} suite
+ id: run_suite
+ run: EXITCODE=0;
+ python3
+ -u ${{ env.SUITE }}/regression.py
+ --ssl
+ --clickhouse-binary-path ${{ env.clickhouse_path }}
+ --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.name=$GITHUB_JOB job.retry=$GITHUB_RUN_ATTEMPT job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)"
+ ${{ env.args }} || EXITCODE=$?;
+ .github/add_link_to_logs.sh;
+ exit $EXITCODE
+ - name: Set Commit Status
+ if: always()
+ run: python3 .github/set_builds_status.py
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ JOB_OUTCOME: ${{ steps.run_suite.outcome }}
+ SUITE_NAME: "Regression ${{ inputs.arch }} Clickhouse Keeper SSL"
+ - name: Create and upload logs
+ if: always()
+ run: .github/create_and_upload_logs.sh 1
+ - name: Upload logs to regression results database
+ if: always()
+ timeout-minutes: 20
+ run: .github/upload_results_to_database.sh 1
+ - uses: actions/upload-artifact@v4
+ if: always()
+ with:
+ name: ${{ env.SUITE }}-${{ inputs.arch }}-ssl-artifacts
+ path: ${{ env.artifact_paths }}
+
+ LDAP:
+ strategy:
+ fail-fast: false
+ matrix:
+ SUITE: [authentication, external_user_directory, role_mapping]
+ needs: [runner_labels_setup]
+ runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }}
+ timeout-minutes: ${{ inputs.timeout_minutes }}
+ steps:
+ - name: Checkout regression repo
+ uses: actions/checkout@v4
+ with:
+ repository: Altinity/clickhouse-regression
+ ref: ${{ inputs.commit }}
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ REPORTS_PATH=${{ runner.temp }}/reports_dir
+ SUITE=ldap/${{ matrix.SUITE }}
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v4
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ name: build_report_package_${{ inputs.arch }}
+ - name: Rename reports
+ run: |
+ mv ${{ env.REPORTS_PATH }}/build_report_*.json ${{ env.REPORTS_PATH }}/build_report_package_${{ inputs.arch }}.json
+ - name: Setup
+ run: .github/setup.sh
+ - name: Get deb url
+ run: python3 .github/get-deb-url.py --reports-path ${{ env.REPORTS_PATH }} --github-env $GITHUB_ENV
+ - name: Run ${{ env.SUITE }} suite
+ id: run_suite
+ run: EXITCODE=0;
+ python3
+ -u ${{ env.SUITE }}/regression.py
+ --clickhouse-binary-path ${{ env.clickhouse_path }}
+ --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.name="$GITHUB_JOB (${{ matrix.SUITE }})" job.retry=$GITHUB_RUN_ATTEMPT job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)"
+ ${{ env.args }} || EXITCODE=$?;
+ .github/add_link_to_logs.sh;
+ exit $EXITCODE
+ - name: Set Commit Status
+ if: always()
+ run: python3 .github/set_builds_status.py
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ JOB_OUTCOME: ${{ steps.run_suite.outcome }}
+ SUITE_NAME: "Regression ${{ inputs.arch }} LDAP ${{ matrix.SUITE }}"
+ - name: Create and upload logs
+ if: always()
+ run: .github/create_and_upload_logs.sh 1
+ - name: Upload logs to regression results database
+ if: always()
+ timeout-minutes: 20
+ run: .github/upload_results_to_database.sh 1
+ - uses: actions/upload-artifact@v4
+ if: always()
+ with:
+ name: ldap-${{ matrix.SUITE }}-${{ inputs.arch }}-artifacts
+ path: ${{ env.artifact_paths }}
+
+ Parquet:
+ needs: [runner_labels_setup]
+ runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }}
+ timeout-minutes: ${{ inputs.timeout_minutes }}
+ steps:
+ - name: Checkout regression repo
+ uses: actions/checkout@v4
+ with:
+ repository: Altinity/clickhouse-regression
+ ref: ${{ inputs.commit }}
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ REPORTS_PATH=${{ runner.temp }}/reports_dir
+ SUITE=parquet
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v4
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ name: build_report_package_${{ inputs.arch }}
+ - name: Rename reports
+ run: |
+ mv ${{ env.REPORTS_PATH }}/build_report_*.json ${{ env.REPORTS_PATH }}/build_report_package_${{ inputs.arch }}.json
+ - name: Setup
+ run: .github/setup.sh
+ - name: Get deb url
+ run: python3 .github/get-deb-url.py --reports-path ${{ env.REPORTS_PATH }} --github-env $GITHUB_ENV
+ - name: Run ${{ env.SUITE }} suite
+ id: run_suite
+ run: EXITCODE=0;
+ python3
+ -u ${{ env.SUITE }}/regression.py
+ --clickhouse-binary-path ${{ env.clickhouse_path }}
+ --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.name=$GITHUB_JOB job.retry=$GITHUB_RUN_ATTEMPT job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)"
+ ${{ env.args }} || EXITCODE=$?;
+ .github/add_link_to_logs.sh;
+ exit $EXITCODE
+ - name: Set Commit Status
+ if: always()
+ run: python3 .github/set_builds_status.py
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ JOB_OUTCOME: ${{ steps.run_suite.outcome }}
+ SUITE_NAME: "Regression ${{ inputs.arch }} Parquet"
+ - name: Create and upload logs
+ if: always()
+ run: .github/create_and_upload_logs.sh 1
+ - name: Upload logs to regression results database
+ if: always()
+ timeout-minutes: 20
+ run: .github/upload_results_to_database.sh 1
+ - uses: actions/upload-artifact@v4
+ if: always()
+ with:
+ name: ${{ env.SUITE }}-${{ inputs.arch }}-artifacts
+ path: ${{ env.artifact_paths }}
+
+ ParquetS3:
+ strategy:
+ fail-fast: false
+ matrix:
+ STORAGE: [minio, aws_s3]
+ needs: [runner_labels_setup]
+ runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }}
+ timeout-minutes: ${{ inputs.timeout_minutes }}
+ steps:
+ - name: Checkout regression repo
+ uses: actions/checkout@v4
+ with:
+ repository: Altinity/clickhouse-regression
+ ref: ${{ inputs.commit }}
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ REPORTS_PATH=${{ runner.temp }}/reports_dir
+ SUITE=parquet
+ STORAGE=${{ matrix.STORAGE}}
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v4
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ name: build_report_package_${{ inputs.arch }}
+ - name: Rename reports
+ run: |
+ mv ${{ env.REPORTS_PATH }}/build_report_*.json ${{ env.REPORTS_PATH }}/build_report_package_${{ inputs.arch }}.json
+ - name: Setup
+ run: .github/setup.sh
+ - name: Get deb url
+ run: python3 .github/get-deb-url.py --reports-path ${{ env.REPORTS_PATH }} --github-env $GITHUB_ENV
+ - name: Run ${{ env.SUITE }} suite
+ id: run_suite
+ run: EXITCODE=0;
+ python3
+ -u ${{ env.SUITE }}/regression.py
+ --clickhouse-binary-path ${{ env.clickhouse_path }}
+ --storage ${{ matrix.STORAGE }}
+ --aws-s3-bucket ${{ secrets.REGRESSION_AWS_S3_BUCKET }}
+ --aws-s3-region ${{ secrets.REGRESSION_AWS_S3_REGION }}
+ --aws-s3-key-id ${{ secrets.REGRESSION_AWS_S3_KEY_ID }}
+ --aws-s3-access-key ${{ secrets.REGRESSION_AWS_S3_SECRET_ACCESS_KEY }}
+ --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.name="$GITHUB_JOB (${{ matrix.STORAGE }})" job.retry=$GITHUB_RUN_ATTEMPT job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)"
+ ${{ env.args }} || EXITCODE=$?;
+ .github/add_link_to_logs.sh;
+ exit $EXITCODE
+ - name: Set Commit Status
+ if: always()
+ run: python3 .github/set_builds_status.py
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ JOB_OUTCOME: ${{ steps.run_suite.outcome }}
+ SUITE_NAME: "Regression ${{ inputs.arch }} Parquet ${{ matrix.STORAGE }}"
+ - name: Create and upload logs
+ if: always()
+ run: .github/create_and_upload_logs.sh 1
+ - name: Upload logs to regression results database
+ if: always()
+ timeout-minutes: 20
+ run: .github/upload_results_to_database.sh 1
+ - uses: actions/upload-artifact@v4
+ if: always()
+ with:
+ name: ${{ env.SUITE }}-${{ env.STORAGE }}-${{ inputs.arch }}-artifacts
+ path: ${{ env.artifact_paths }}
+
+ S3:
+ strategy:
+ fail-fast: false
+ matrix:
+ STORAGE: [minio, aws_s3, gcs, azure]
+ needs: [runner_labels_setup]
+ runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }}
+ timeout-minutes: ${{ inputs.timeout_minutes }}
+ steps:
+ - name: Checkout regression repo
+ uses: actions/checkout@v4
+ with:
+ repository: Altinity/clickhouse-regression
+ ref: ${{ inputs.commit }}
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ REPORTS_PATH=${{ runner.temp }}/reports_dir
+ SUITE=s3
+ STORAGE=/${{ matrix.STORAGE }}
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v4
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ name: build_report_package_${{ inputs.arch }}
+ - name: Rename reports
+ run: |
+ mv ${{ env.REPORTS_PATH }}/build_report_*.json ${{ env.REPORTS_PATH }}/build_report_package_${{ inputs.arch }}.json
+ - name: Setup
+ run: .github/setup.sh
+ - name: Get deb url
+ run: python3 .github/get-deb-url.py --reports-path ${{ env.REPORTS_PATH }} --github-env $GITHUB_ENV
+ - name: Run ${{ env.SUITE }} suite
+ id: run_suite
+ run: EXITCODE=0;
+ python3
+ -u ${{ env.SUITE }}/regression.py
+ --clickhouse-binary-path ${{ env.clickhouse_path }}
+ --storage ${{ matrix.STORAGE }}
+ --gcs-uri ${{ secrets.REGRESSION_GCS_URI }}
+ --gcs-key-id ${{ secrets.REGRESSION_GCS_KEY_ID }}
+ --gcs-key-secret ${{ secrets.REGRESSION_GCS_KEY_SECRET }}
+ --aws-s3-bucket ${{ secrets.REGRESSION_AWS_S3_BUCKET }}
+ --aws-s3-region ${{ secrets.REGRESSION_AWS_S3_REGION }}
+ --aws-s3-key-id ${{ secrets.REGRESSION_AWS_S3_KEY_ID }}
+ --aws-s3-access-key ${{ secrets.REGRESSION_AWS_S3_SECRET_ACCESS_KEY }}
+ --azure-account-name ${{ secrets.AZURE_ACCOUNT_NAME }}
+ --azure-storage-key ${{ secrets.AZURE_STORAGE_KEY }}
+ --azure-container ${{ secrets.AZURE_CONTAINER_NAME }}
+ --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.name="$GITHUB_JOB (${{ matrix.STORAGE }})" job.retry=$GITHUB_RUN_ATTEMPT job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)"
+ ${{ env.args }} || EXITCODE=$?;
+ .github/add_link_to_logs.sh;
+ exit $EXITCODE
+ - name: Set Commit Status
+ if: always()
+ run: python3 .github/set_builds_status.py
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ JOB_OUTCOME: ${{ steps.run_suite.outcome }}
+ SUITE_NAME: "Regression ${{ inputs.arch }} S3 ${{ matrix.STORAGE }}"
+ - name: Create and upload logs
+ if: always()
+ run: .github/create_and_upload_logs.sh 1
+ - name: Upload logs to regression results database
+ if: always()
+ timeout-minutes: 20
+ run: .github/upload_results_to_database.sh 1
+ - uses: actions/upload-artifact@v4
+ if: always()
+ with:
+ name: ${{ env.SUITE }}-${{ matrix.STORAGE }}-${{ inputs.arch }}-artifacts
+ path: ${{ env.artifact_paths}}
+
+ TieredStorage:
+ strategy:
+ fail-fast: false
+ matrix:
+ STORAGE: [minio, s3amazon, s3gcs]
+ needs: [runner_labels_setup]
+ runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }}
+ timeout-minutes: ${{ inputs.timeout_minutes }}
+ steps:
+ - name: Checkout regression repo
+ uses: actions/checkout@v4
+ with:
+ repository: Altinity/clickhouse-regression
+ ref: ${{ inputs.commit }}
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ REPORTS_PATH=${{ runner.temp }}/reports_dir
+ SUITE=tiered_storage
+ STORAGE=/${{ matrix.STORAGE }}
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v4
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ name: build_report_package_${{ inputs.arch }}
+ - name: Rename reports
+ run: |
+ mv ${{ env.REPORTS_PATH }}/build_report_*.json ${{ env.REPORTS_PATH }}/build_report_package_${{ inputs.arch }}.json
+ - name: Setup
+ run: .github/setup.sh
+ - name: Get deb url
+ run: python3 .github/get-deb-url.py --reports-path ${{ env.REPORTS_PATH }} --github-env $GITHUB_ENV
+ - name: Run ${{ env.SUITE }} suite
+ id: run_suite
+ run: EXITCODE=0;
+ python3
+ -u ${{ env.SUITE }}/regression.py
+ --clickhouse-binary-path ${{ env.clickhouse_path }}
+ --aws-s3-access-key ${{ secrets.REGRESSION_AWS_S3_SECRET_ACCESS_KEY }}
+ --aws-s3-key-id ${{ secrets.REGRESSION_AWS_S3_KEY_ID }}
+ --aws-s3-uri https://s3.${{ secrets.REGRESSION_AWS_S3_REGION}}.amazonaws.com/${{ secrets.REGRESSION_AWS_S3_BUCKET }}/data/
+ --gcs-key-id ${{ secrets.REGRESSION_GCS_KEY_ID }}
+ --gcs-key-secret ${{ secrets.REGRESSION_GCS_KEY_SECRET }}
+ --gcs-uri ${{ secrets.REGRESSION_GCS_URI }}
+ --with-${{ matrix.STORAGE }}
+ --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.name="$GITHUB_JOB (${{ matrix.STORAGE }})" job.retry=$GITHUB_RUN_ATTEMPT job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)"
+ ${{ env.args }} || EXITCODE=$?;
+ .github/add_link_to_logs.sh;
+ exit $EXITCODE
+ - name: Set Commit Status
+ if: always()
+ run: python3 .github/set_builds_status.py
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ JOB_OUTCOME: ${{ steps.run_suite.outcome }}
+ SUITE_NAME: "Regression ${{ inputs.arch }} Tiered Storage ${{ matrix.STORAGE }}"
+ - name: Create and upload logs
+ if: always()
+ run: .github/create_and_upload_logs.sh 1
+ - name: Upload logs to regression results database
+ if: always()
+ timeout-minutes: 20
+ run: .github/upload_results_to_database.sh 1
+ - uses: actions/upload-artifact@v4
+ if: always()
+ with:
+ name: ${{ env.SUITE }}-${{ matrix.STORAGE }}-${{ inputs.arch }}-artifacts
+ path: ${{ env.artifact_paths}}
diff --git a/.github/workflows/release_branches.yml b/.github/workflows/release_branches.yml
index c404ea3ae4cc..385d0023b537 100644
--- a/.github/workflows/release_branches.yml
+++ b/.github/workflows/release_branches.yml
@@ -1,1650 +1,537 @@
-# generated by praktika
-
+# yamllint disable rule:comments-indentation
name: ReleaseBranchCI
-on:
- push:
- branches: ['2[1-9].[1-9][0-9]', '2[1-9].[1-9]']
-
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
- CHECKOUT_REF: ""
-
-# Allow updating GH commit statuses and PR comments to post an actual job reports link
-permissions: write-all
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ CLICKHOUSE_TEST_STAT_LOGIN: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }}
+ CLICKHOUSE_TEST_STAT_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }}
+ CLICKHOUSE_TEST_STAT_URL: ${{ secrets.CLICKHOUSE_TEST_STAT_URL }}
+ DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
+ ROBOT_TOKEN: ${{ secrets.ROBOT_TOKEN }}
+
+on: # yamllint disable-line rule:truthy
+ pull_request:
+ types:
+ - synchronize
+ - reopened
+ - opened
+ branches:
+ - 'releases/*'
+ push:
+ branches:
+ - 'releases/*'
+ tags:
+ - '*'
+ workflow_dispatch:
+ inputs:
+ workflow_name:
+ description: 'Name of the workflow'
+ required: false
+ type: string
jobs:
-
- config_workflow:
- runs-on: [self-hosted, style-checker-aarch64]
- needs: []
- name: "Config Workflow"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Config Workflow' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Config Workflow' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- dockers_build_arm:
- runs-on: [self-hosted, style-checker-aarch64]
- needs: [config_workflow]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYXJtKQ==') }}
- name: "Dockers Build (arm)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Dockers Build (arm)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Dockers Build (arm)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- dockers_build_amd_and_merge:
- runs-on: [self-hosted, style-checker]
- needs: [config_workflow, dockers_build_arm]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYW1kKSBhbmQgTWVyZ2U=') }}
- name: "Dockers Build (amd) and Merge"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Dockers Build (amd) and Merge' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Dockers Build (amd) and Merge' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_debug:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9kZWJ1Zyk=') }}
- name: "Build (amd_debug)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_debug)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_debug)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_release:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9yZWxlYXNlKQ==') }}
- name: "Build (amd_release)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_release)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_release)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_asan:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9hc2FuKQ==') }}
- name: "Build (amd_asan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_asan)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_asan)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_tsan:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF90c2FuKQ==') }}
- name: "Build (amd_tsan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_tsan)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_tsan)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_msan:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9tc2FuKQ==') }}
- name: "Build (amd_msan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_msan)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_msan)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_ubsan:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF91YnNhbik=') }}
- name: "Build (amd_ubsan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_ubsan)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_ubsan)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_binary:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9iaW5hcnkp') }}
- name: "Build (amd_binary)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_binary)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_binary)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_arm_release:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9yZWxlYXNlKQ==') }}
- name: "Build (arm_release)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (arm_release)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (arm_release)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_arm_asan:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9hc2FuKQ==') }}
- name: "Build (arm_asan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (arm_asan)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (arm_asan)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_amd_darwin:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9kYXJ3aW4p') }}
- name: "Build (amd_darwin)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_darwin)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (amd_darwin)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_arm_darwin:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, dockers_build_amd_and_merge]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9kYXJ3aW4p') }}
- name: "Build (arm_darwin)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (arm_darwin)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (arm_darwin)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- docker_server_image:
- runs-on: [self-hosted, style-checker]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_release, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIHNlcnZlciBpbWFnZQ==') }}
- name: "Docker server image"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Docker server image' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Docker server image' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- docker_keeper_image:
- runs-on: [self-hosted, style-checker]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_release, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIGtlZXBlciBpbWFnZQ==') }}
- name: "Docker keeper image"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Docker keeper image' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Docker keeper image' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- install_packages_release:
- runs-on: [self-hosted, style-checker]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAocmVsZWFzZSk=') }}
- name: "Install packages (release)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Install packages (release)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Install packages (release)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- install_packages_aarch64:
- runs-on: [self-hosted, style-checker-aarch64]
- needs: [config_workflow, dockers_build_amd_and_merge, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYWFyY2g2NCk=') }}
- name: "Install packages (aarch64)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Install packages (aarch64)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Install packages (aarch64)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_1_4:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIDEvNCk=') }}
- name: "Integration tests (asan, 1/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, 1/4)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, 1/4)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_2_4:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIDIvNCk=') }}
- name: "Integration tests (asan, 2/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, 2/4)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, 2/4)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_3_4:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIDMvNCk=') }}
- name: "Integration tests (asan, 3/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, 3/4)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, 3/4)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_4_4:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIDQvNCk=') }}
- name: "Integration tests (asan, 4/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, 4/4)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, 4/4)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_old_analyzer_1_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIG9sZCBhbmFseXplciwgMS82KQ==') }}
- name: "Integration tests (asan, old analyzer, 1/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, old analyzer, 1/6)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, old analyzer, 1/6)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_old_analyzer_2_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIG9sZCBhbmFseXplciwgMi82KQ==') }}
- name: "Integration tests (asan, old analyzer, 2/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, old analyzer, 2/6)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, old analyzer, 2/6)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_old_analyzer_3_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIG9sZCBhbmFseXplciwgMy82KQ==') }}
- name: "Integration tests (asan, old analyzer, 3/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, old analyzer, 3/6)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, old analyzer, 3/6)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_old_analyzer_4_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIG9sZCBhbmFseXplciwgNC82KQ==') }}
- name: "Integration tests (asan, old analyzer, 4/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, old analyzer, 4/6)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, old analyzer, 4/6)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_old_analyzer_5_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIG9sZCBhbmFseXplciwgNS82KQ==') }}
- name: "Integration tests (asan, old analyzer, 5/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, old analyzer, 5/6)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, old analyzer, 5/6)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_asan_old_analyzer_6_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFzYW4sIG9sZCBhbmFseXplciwgNi82KQ==') }}
- name: "Integration tests (asan, old analyzer, 6/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (asan, old analyzer, 6/6)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (asan, old analyzer, 6/6)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_release_1_4:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHJlbGVhc2UsIDEvNCk=') }}
- name: "Integration tests (release, 1/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (release, 1/4)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (release, 1/4)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_release_2_4:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHJlbGVhc2UsIDIvNCk=') }}
- name: "Integration tests (release, 2/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (release, 2/4)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (release, 2/4)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_release_3_4:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHJlbGVhc2UsIDMvNCk=') }}
- name: "Integration tests (release, 3/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (release, 3/4)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (release, 3/4)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_release_4_4:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHJlbGVhc2UsIDQvNCk=') }}
- name: "Integration tests (release, 4/4)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (release, 4/4)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (release, 4/4)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_tsan_1_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHRzYW4sIDEvNik=') }}
- name: "Integration tests (tsan, 1/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (tsan, 1/6)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (tsan, 1/6)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_tsan_2_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHRzYW4sIDIvNik=') }}
- name: "Integration tests (tsan, 2/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (tsan, 2/6)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (tsan, 2/6)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_tsan_3_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHRzYW4sIDMvNik=') }}
- name: "Integration tests (tsan, 3/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (tsan, 3/6)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (tsan, 3/6)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_tsan_4_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHRzYW4sIDQvNik=') }}
- name: "Integration tests (tsan, 4/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (tsan, 4/6)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (tsan, 4/6)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_tsan_5_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHRzYW4sIDUvNik=') }}
- name: "Integration tests (tsan, 5/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (tsan, 5/6)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (tsan, 5/6)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_tsan_6_6:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKHRzYW4sIDYvNik=') }}
- name: "Integration tests (tsan, 6/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (tsan, 6/6)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (tsan, 6/6)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stress_test_debug:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_debug]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGRlYnVnKQ==') }}
- name: "Stress test (debug)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (debug)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stress test (debug)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stress_test_tsan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKHRzYW4p') }}
- name: "Stress test (tsan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (tsan)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stress test (tsan)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stress_test_asan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGFzYW4p') }}
- name: "Stress test (asan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (asan)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stress test (asan)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stress_test_ubsan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_ubsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKHVic2FuKQ==') }}
- name: "Stress test (ubsan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (ubsan)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stress test (ubsan)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stress_test_msan:
- runs-on: [self-hosted, func-tester]
- needs: [config_workflow, dockers_build_amd_and_merge, build_amd_msan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKG1zYW4p') }}
- name: "Stress test (msan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (msan)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stress test (msan)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
+ RunConfig:
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
+ outputs:
+ data: ${{ steps.runconfig.outputs.CI_DATA }}
+ steps:
+ - name: Check out repository code
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6
+ with:
+ clear-repository: true # to ensure correct digests
+ fetch-depth: 0 # to get version
+ filter: tree:0
+ - name: Debug Info
+ uses: ./.github/actions/debug
+ - name: PrepareRunConfig
+ id: runconfig
+ run: |
+ echo "::group::configure CI run"
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --outfile ${{ runner.temp }}/ci_run_data.json
+ echo "::endgroup::"
+ echo "::group::CI run configure results"
+ python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
+ echo "::endgroup::"
+ {
+ echo 'CI_DATA<> "$GITHUB_OUTPUT"
+ - name: Re-create GH statuses for skipped jobs if any
+ run: |
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ runner.temp }}/ci_run_data.json --update-gh-statuses
+ - name: Note report location to summary
+ env:
+ PR_NUMBER: ${{ github.event.pull_request.number || 0 }}
+ COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
+ run: |
+ REPORT_LINK=https://s3.amazonaws.com/altinity-build-artifacts/$PR_NUMBER/$COMMIT_SHA/ci_run_report.html
+ echo "Workflow Run Report: [View Report]($REPORT_LINK)" >> $GITHUB_STEP_SUMMARY
+
+ BuildDockers:
+ needs: [RunConfig]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/docker_test_images.yml
+ secrets: inherit
+ with:
+ data: ${{ needs.RunConfig.outputs.data }}
+ CompatibilityCheckX86:
+ needs: [RunConfig, BuilderDebRelease]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Compatibility check (release)
+ runner_type: altinity-func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+ CompatibilityCheckAarch64:
+ needs: [RunConfig, BuilderDebAarch64]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Compatibility check (aarch64)
+ runner_type: altinity-func-tester-aarch64
+ data: ${{ needs.RunConfig.outputs.data }}
+#########################################################################################
+#################################### ORDINARY BUILDS ####################################
+#########################################################################################
+ BuilderDebRelease:
+ needs: [RunConfig, BuildDockers]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_build.yml
+ secrets: inherit
+ with:
+ build_name: package_release
+ checkout_depth: 0
+ data: ${{ needs.RunConfig.outputs.data }}
+ # always rebuild on release branches to be able to publish from any commit
+ force: true
+ BuilderDebAarch64:
+ needs: [RunConfig, BuildDockers]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_build.yml
+ secrets: inherit
+ with:
+ build_name: package_aarch64
+ checkout_depth: 0
+ data: ${{ needs.RunConfig.outputs.data }}
+ # always rebuild on release branches to be able to publish from any commit
+ force: true
+ runner_type: builder-aarch64
+ BuilderDebAsan:
+ needs: [RunConfig, BuildDockers]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_build.yml
+ secrets: inherit
+ with:
+ build_name: package_asan
+ data: ${{ needs.RunConfig.outputs.data }}
+ BuilderDebUBsan:
+ needs: [RunConfig, BuildDockers]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_build.yml
+ secrets: inherit
+ with:
+ build_name: package_ubsan
+ data: ${{ needs.RunConfig.outputs.data }}
+ BuilderDebTsan:
+ needs: [RunConfig, BuildDockers]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_build.yml
+ secrets: inherit
+ with:
+ build_name: package_tsan
+ data: ${{ needs.RunConfig.outputs.data }}
+ BuilderDebMsan:
+ needs: [RunConfig, BuildDockers]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_build.yml
+ secrets: inherit
+ with:
+ build_name: package_msan
+ data: ${{ needs.RunConfig.outputs.data }}
+ BuilderDebDebug:
+ needs: [RunConfig, BuildDockers]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_build.yml
+ secrets: inherit
+ with:
+ build_name: package_debug
+ data: ${{ needs.RunConfig.outputs.data }}
+ force: true
+############################################################################################
+##################################### Docker images #######################################
+############################################################################################
+ DockerServerImage:
+ needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Docker server image
+ runner_type: altinity-func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+ DockerKeeperImage:
+ needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Docker keeper image
+ runner_type: altinity-func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+ GrypeScan:
+ needs: [RunConfig, DockerServerImage, DockerKeeperImage]
+ if: ${{ !failure() && !cancelled() }}
+ strategy:
+ fail-fast: false
+ matrix:
+ include:
+ - image: server
+ suffix: ''
+ - image: server
+ suffix: '-alpine'
+ - image: keeper
+ suffix: ''
+ uses: ./.github/workflows/grype_scan.yml
+ secrets: inherit
+ with:
+ docker_image: altinityinfra/clickhouse-${{ matrix.image }}
+ version: ${{ fromJson(needs.RunConfig.outputs.data).version }}
+ tag-suffix: ${{ matrix.suffix }}
+############################################################################################
+##################################### BUILD REPORTER #######################################
+############################################################################################
+ Builds_Report:
+ # run report check for failed builds to indicate the CI error
+ if: ${{ !cancelled() && needs.RunConfig.result == 'success' && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Builds') }}
+ needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64, BuilderDebAsan, BuilderDebUBsan, BuilderDebMsan, BuilderDebTsan, BuilderDebDebug]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
+ steps:
+ - name: Check out repository code
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6
+ - name: Download reports
+ run: |
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(needs.RunConfig.outputs.data) }} --pre --job-name Builds
+ - name: Builds report
+ run: |
+ cd "$GITHUB_WORKSPACE/tests/ci"
+ python3 ./build_report_check.py --reports package_release package_aarch64 package_asan package_msan package_ubsan package_tsan package_debug
+ - name: Set status
+ # NOTE(vnemkov): generate and upload the report even if previous step failed
+ if: success() || failure()
+ run: |
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(needs.RunConfig.outputs.data) }} --post --job-name Builds
+ MarkReleaseReady:
+ if: ${{ !failure() && !cancelled() }}
+ needs:
+ - BuilderDebRelease
+ - BuilderDebAarch64
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
+ steps:
+ - name: Debug
+ run: |
+ echo need with different filters
+ cat << 'EOF'
+ ${{ toJSON(needs) }}
+ ${{ toJSON(needs.*.result) }}
+ no failures ${{ !contains(needs.*.result, 'failure') }}
+ no skips ${{ !contains(needs.*.result, 'skipped') }}
+ no both ${{ !(contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
+ EOF
+ - name: Not ready
+ # fail the job to be able restart it
+ if: ${{ contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure') }}
+ run: exit 1
+ - name: Check out repository code
+ if: ${{ ! (contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6
+ - name: Mark Commit Release Ready
+ if: ${{ ! (contains(needs.*.result, 'skipped') || contains(needs.*.result, 'failure')) }}
+ run: |
+ cd "$GITHUB_WORKSPACE/tests/ci"
+ python3 mark_release_ready.py
+############################################################################################
+#################################### INSTALL PACKAGES ######################################
+############################################################################################
+ InstallPackagesTestRelease:
+ needs: [RunConfig, BuilderDebRelease]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Install packages (release)
+ runner_type: altinity-func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+ run_command: |
+ python3 install_check.py "$CHECK_NAME"
+ InstallPackagesTestAarch64:
+ needs: [RunConfig, BuilderDebAarch64]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Install packages (aarch64)
+ runner_type: altinity-func-tester-aarch64
+ data: ${{ needs.RunConfig.outputs.data }}
+ run_command: |
+ python3 install_check.py "$CHECK_NAME"
+##############################################################################################
+########################### FUNCTIONAl STATELESS TESTS #######################################
+##############################################################################################
+ FunctionalStatelessTestRelease:
+ needs: [RunConfig, BuilderDebRelease]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Stateless tests (release)
+ runner_type: altinity-func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+ FunctionalStatelessTestAarch64:
+ needs: [RunConfig, BuilderDebAarch64]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Stateless tests (aarch64)
+ runner_type: altinity-func-tester-aarch64
+ data: ${{ needs.RunConfig.outputs.data }}
+ FunctionalStatelessTestAsan:
+ needs: [RunConfig, BuilderDebAsan]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Stateless tests (asan)
+ runner_type: altinity-func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+ FunctionalStatelessTestTsan:
+ needs: [RunConfig, BuilderDebTsan]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Stateless tests (tsan)
+ runner_type: altinity-func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+ FunctionalStatelessTestMsan:
+ needs: [RunConfig, BuilderDebMsan]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Stateless tests (msan)
+ runner_type: altinity-func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+ FunctionalStatelessTestUBsan:
+ needs: [RunConfig, BuilderDebUBsan]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Stateless tests (ubsan)
+ runner_type: altinity-func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+ FunctionalStatelessTestDebug:
+ needs: [RunConfig, BuilderDebDebug]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Stateless tests (debug)
+ runner_type: altinity-func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+##############################################################################################
+######################################### STRESS TESTS #######################################
+##############################################################################################
+ StressTestAsan:
+ needs: [RunConfig, BuilderDebAsan]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Stress test (asan)
+ runner_type: altinity-func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+ StressTestTsan:
+ needs: [RunConfig, BuilderDebTsan]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Stress test (tsan)
+ runner_type: altinity-func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+ StressTestMsan:
+ needs: [RunConfig, BuilderDebMsan]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Stress test (msan)
+ runner_type: altinity-func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+ StressTestUBsan:
+ needs: [RunConfig, BuilderDebUBsan]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Stress test (ubsan)
+ runner_type: altinity-func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+ StressTestDebug:
+ needs: [RunConfig, BuilderDebDebug]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Stress test (debug)
+ runner_type: altinity-func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+#############################################################################################
+############################# INTEGRATION TESTS #############################################
+#############################################################################################
+ IntegrationTestsAsan:
+ needs: [RunConfig, BuilderDebAsan]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Integration tests (asan)
+ runner_type: altinity-func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+ IntegrationTestsAnalyzerAsan:
+ needs: [RunConfig, BuilderDebAsan]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Integration tests (asan, old analyzer)
+ runner_type: altinity-func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+ IntegrationTestsTsan:
+ needs: [RunConfig, BuilderDebTsan]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Integration tests (tsan)
+ runner_type: altinity-func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+ IntegrationTestsRelease:
+ needs: [RunConfig, BuilderDebRelease]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_test.yml
+ secrets: inherit
+ with:
+ test_name: Integration tests (release)
+ runner_type: altinity-func-tester
+ data: ${{ needs.RunConfig.outputs.data }}
+#############################################################################################
+##################################### REGRESSION TESTS ######################################
+#############################################################################################
+ RegressionTestsRelease:
+ needs: [RunConfig, BuilderDebRelease]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.RunConfig.outputs.data).ci_settings.exclude_keywords, 'regression')}}
+ uses: ./.github/workflows/regression.yml
+ secrets: inherit
+ with:
+ runner_type: altinity-on-demand, altinity-regression-tester
+ commit: 4a249bc0422d93c6e466edbe5af74fcb0f564820
+ arch: release
+ build_sha: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
+ timeout_minutes: 300
+ RegressionTestsAarch64:
+ needs: [RunConfig, BuilderDebAarch64]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.RunConfig.outputs.data).ci_settings.exclude_keywords, 'regression') && !contains(fromJson(needs.RunConfig.outputs.data).ci_settings.exclude_keywords, 'aarch64')}}
+ uses: ./.github/workflows/regression.yml
+ secrets: inherit
+ with:
+ runner_type: altinity-on-demand, altinity-regression-tester-aarch64
+ commit: 4a249bc0422d93c6e466edbe5af74fcb0f564820
+ arch: aarch64
+ build_sha: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
+ timeout_minutes: 300
+ SignRelease:
+ needs: [RunConfig, BuilderDebRelease]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_sign.yml
+ secrets: inherit
+ with:
+ test_name: Sign release
+ runner_type: altinity-style-checker
+ data: ${{ needs.RunConfig.outputs.data }}
+ SignAarch64:
+ needs: [RunConfig, BuilderDebAarch64]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_sign.yml
+ secrets: inherit
+ with:
+ test_name: Sign aarch64
+ runner_type: altinity-style-checker-aarch64
+ data: ${{ needs.RunConfig.outputs.data }}
+ FinishCheck:
+ if: ${{ !cancelled() }}
+ needs:
+ - RunConfig
+ - DockerServerImage
+ - DockerKeeperImage
+ - Builds_Report
+ - MarkReleaseReady
+ - FunctionalStatelessTestDebug
+ - FunctionalStatelessTestRelease
+ - FunctionalStatelessTestAarch64
+ - FunctionalStatelessTestAsan
+ - FunctionalStatelessTestTsan
+ - FunctionalStatelessTestMsan
+ - FunctionalStatelessTestUBsan
+ - StressTestDebug
+ - StressTestAsan
+ - StressTestTsan
+ - StressTestMsan
+ - StressTestUBsan
+ - IntegrationTestsAsan
+ - IntegrationTestsTsan
+ - IntegrationTestsRelease
+ - CompatibilityCheckX86
+ - CompatibilityCheckAarch64
+ - RegressionTestsRelease
+ - RegressionTestsAarch64
+ - GrypeScan
+ - SignRelease
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
+ steps:
+ - name: Check out repository code
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6
+ with:
+ clear-repository: true
+ - name: Finish label
+ if: ${{ !failure() }}
+ run: |
+ # update overall ci report
+ python3 ./tests/ci/finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
+ - name: Check Workflow results
+ if: ${{ !cancelled() }}
+ run: |
+ export WORKFLOW_RESULT_FILE="/tmp/workflow_results.json"
+ cat > "$WORKFLOW_RESULT_FILE" << 'EOF'
+ ${{ toJson(needs) }}
+ EOF
+ python3 ./tests/ci/ci_buddy.py --check-wf-status
+ - name: Finalize workflow report
+ if: ${{ !cancelled() }}
+ uses: ./.github/actions/create_workflow_report
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ CHECKS_DATABASE_HOST: ${{ secrets.CHECKS_DATABASE_HOST }}
+ CHECKS_DATABASE_USER: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }}
+ CHECKS_DATABASE_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }}
+ with:
+ final: true
diff --git a/.github/workflows/repo-sanity-checks.yml b/.github/workflows/repo-sanity-checks.yml
new file mode 100644
index 000000000000..ec50a056b730
--- /dev/null
+++ b/.github/workflows/repo-sanity-checks.yml
@@ -0,0 +1,150 @@
+name: Repository Sanity Checks
+
+on:
+ workflow_dispatch: # Manual trigger only
+
+ workflow_call:
+
+jobs:
+ sanity-checks:
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker]
+ strategy:
+ fail-fast: false # Continue with other combinations if one fails
+ matrix:
+ include:
+ # Production packages
+ - env: prod
+ type: deb
+ base: ubuntu:22.04
+ repo_url: https://builds.altinity.cloud/apt-repo
+ - env: prod
+ type: rpm
+ base: centos:8
+ repo_url: https://builds.altinity.cloud/yum-repo
+ # FIPS Production packages
+ - env: prod-fips
+ type: deb
+ base: ubuntu:22.04
+ repo_url: https://builds.altinity.cloud/fips-apt-repo
+ - env: prod-fips
+ type: rpm
+ base: centos:8
+ repo_url: https://builds.altinity.cloud/fips-yum-repo
+ # Staging packages
+ - env: staging
+ type: deb
+ base: ubuntu:22.04
+ repo_url: https://builds.staging.altinity.cloud/apt-repo
+ - env: staging
+ type: rpm
+ base: centos:8
+ repo_url: https://builds.staging.altinity.cloud/yum-repo
+ # FIPS Staging packages
+ - env: staging-fips
+ type: deb
+ base: ubuntu:22.04
+ repo_url: https://builds.staging.altinity.cloud/fips-apt-repo
+ - env: staging-fips
+ type: rpm
+ base: centos:8
+ repo_url: https://builds.staging.altinity.cloud/fips-yum-repo
+ # Hotfix packages
+ - env: hotfix
+ type: deb
+ base: ubuntu:22.04
+ repo_url: https://builds.altinity.cloud/hotfix-apt-repo
+ - env: hotfix
+ type: rpm
+ base: centos:8
+ repo_url: https://builds.altinity.cloud/hotfix-yum-repo
+ # Antalya experimental packages
+ - env: antalya
+ type: deb
+ base: ubuntu:22.04
+ repo_url: https://builds.altinity.cloud/antalya-apt-repo
+ - env: antalya
+ type: rpm
+ base: centos:8
+ repo_url: https://builds.altinity.cloud/antalya-yum-repo
+ # Hotfix staging packages
+ - env: hotfix-staging
+ type: deb
+ base: ubuntu:22.04
+ repo_url: https://builds.staging.altinity.cloud/hotfix-apt-repo
+ - env: hotfix-staging
+ type: rpm
+ base: centos:8
+ repo_url: https://builds.staging.altinity.cloud/hotfix-yum-repo
+ # Antalya experimental staging packages
+ - env: antalya-staging
+ type: deb
+ base: ubuntu:22.04
+ repo_url: https://builds.staging.altinity.cloud/antalya-apt-repo
+ - env: antalya-staging
+ type: rpm
+ base: centos:8
+ repo_url: https://builds.staging.altinity.cloud/antalya-yum-repo
+
+ steps:
+ - name: Run sanity check
+ run: |
+ cat << 'EOF' > sanity.sh
+ #!/bin/bash
+ set -e -x
+
+ # Package installation commands based on type
+ if [ "${{ matrix.type }}" = "deb" ]; then
+ export DEBIAN_FRONTEND=noninteractive
+ apt-get update && apt-get install -y apt-transport-https ca-certificates curl gnupg2 dialog sudo
+ mkdir -p /usr/share/keyrings
+ curl -s "${REPO_URL}/pubkey.gpg" | gpg --dearmor > /usr/share/keyrings/altinity-archive-keyring.gpg
+ echo "deb [signed-by=/usr/share/keyrings/altinity-archive-keyring.gpg] ${REPO_URL} stable main" > /etc/apt/sources.list.d/altinity.list
+ apt-get update
+ apt-get install -y clickhouse-server clickhouse-client
+ else
+ sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-*
+ sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*
+ yum install -y curl gnupg2 sudo
+ if [[ "${{ matrix.env }}" == *"staging"* ]]; then
+ curl "${REPO_URL}/altinity-staging.repo" -o /etc/yum.repos.d/altinity-staging.repo
+ else
+ curl "${REPO_URL}/altinity.repo" -o /etc/yum.repos.d/altinity.repo
+ fi
+ yum install -y clickhouse-server clickhouse-client
+ fi
+
+ # Ensure correct ownership
+ chown -R clickhouse /var/lib/clickhouse/
+ chown -R clickhouse /var/log/clickhouse-server/
+
+ # Check server version
+ server_version=$(clickhouse-server --version)
+ echo "$server_version" | grep "altinity" || FAILED_SERVER=true
+
+ # Start server and test
+ sudo -u clickhouse clickhouse-server --config-file /etc/clickhouse-server/config.xml --daemon
+ sleep 10
+ clickhouse-client -q 'SELECT 1'
+
+ # Check client version
+ client_version=$(clickhouse-client --version)
+ echo "$client_version" | grep "altinity" || FAILED_CLIENT=true
+
+ # Report results
+ if [ "$FAILED_SERVER" = true ]; then
+ echo "::error::Server check failed - Version: $server_version"
+ exit 1
+ elif [ "$FAILED_CLIENT" = true ]; then
+ echo "::error::Client check failed - Version: $client_version"
+ exit 1
+ else
+ echo "All checks passed successfully!"
+ fi
+ EOF
+
+ chmod +x sanity.sh
+ docker run --rm \
+ -v $(pwd)/sanity.sh:/sanity.sh \
+ -e REPO_URL="${{ matrix.repo_url }}" \
+ ${{ matrix.base }} \
+ /sanity.sh
diff --git a/.github/workflows/reusable_build.yml b/.github/workflows/reusable_build.yml
index 0256ea4bde0d..fd2e44fc16ca 100644
--- a/.github/workflows/reusable_build.yml
+++ b/.github/workflows/reusable_build.yml
@@ -1,9 +1,14 @@
-### For the pure soul wishes to move it to another place
-# https://github.com/orgs/community/discussions/9050
-
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ CLICKHOUSE_TEST_STAT_LOGIN: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }}
+ CLICKHOUSE_TEST_STAT_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }}
+ CLICKHOUSE_TEST_STAT_URL: ${{ secrets.CLICKHOUSE_TEST_STAT_URL }}
+ DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
+ ROBOT_TOKEN: ${{ secrets.ROBOT_TOKEN }}
name: Build ClickHouse
'on':
@@ -34,12 +39,15 @@ name: Build ClickHouse
description: additional ENV variables to setup the job
type: string
secrets:
- robot_git_token:
- required: false
- ci_db_url:
- required: false
- ci_db_password:
+ secret_envs:
+ description: if given, it's passed to the environments
required: false
+ AWS_SECRET_ACCESS_KEY:
+ description: the access key to the aws param store.
+ required: true
+ AWS_ACCESS_KEY_ID:
+ description: the access key id to the aws param store.
+ required: true
jobs:
Build:
@@ -47,10 +55,10 @@ jobs:
if: ${{ contains(fromJson(inputs.data).jobs_data.jobs_to_do, inputs.build_name) || inputs.force }}
env:
GITHUB_JOB_OVERRIDDEN: Build-${{inputs.build_name}}
- runs-on: [self-hosted, '${{inputs.runner_type}}']
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
steps:
- name: Check out repository code
- uses: ClickHouse/checkout@v1
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6
with:
clear-repository: true
ref: ${{ fromJson(inputs.data).git_ref }}
@@ -61,18 +69,10 @@ jobs:
run: |
cat >> "$GITHUB_ENV" << 'EOF'
${{inputs.additional_envs}}
+ ${{secrets.secret_envs}}
DOCKER_TAG<> "$GITHUB_ENV"
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
@@ -90,6 +90,11 @@ jobs:
uses: ./.github/actions/common_setup
with:
job_type: build_check
+ - name: Create source tar
+ run: |
+ mkdir -p "$TEMP_PATH/build_check/package_release"
+ cd .. && tar czf $TEMP_PATH/build_source.src.tar.gz ClickHouse/
+ cd $TEMP_PATH && tar xvzf $TEMP_PATH/build_source.src.tar.gz
- name: Pre
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --pre --job-name '${{inputs.build_name}}'
@@ -109,6 +114,11 @@ jobs:
if: ${{ !cancelled() }}
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --mark-success --job-name '${{inputs.build_name}}'
+ - name: Upload json report
+ uses: actions/upload-artifact@v4
+ with:
+ path: ${{ env.TEMP_PATH }}/build_report_*.json
+ name: build_report_${{inputs.build_name}}
- name: Clean
if: always()
uses: ./.github/actions/clean
diff --git a/.github/workflows/reusable_sign.yml b/.github/workflows/reusable_sign.yml
new file mode 100644
index 000000000000..7bfed2758359
--- /dev/null
+++ b/.github/workflows/reusable_sign.yml
@@ -0,0 +1,166 @@
+name: Sigining workflow
+'on':
+ workflow_call:
+ inputs:
+ test_name:
+ description: the value of test type from tests/ci/ci_config.py, ends up as $CHECK_NAME ENV
+ required: true
+ type: string
+ runner_type:
+ description: the label of runner to use
+ required: true
+ type: string
+ run_command:
+ description: the command to launch the check
+ default: ""
+ required: false
+ type: string
+ checkout_depth:
+ description: the value of the git shallow checkout
+ required: false
+ type: number
+ default: 1
+ submodules:
+ description: if the submodules should be checked out
+ required: false
+ type: boolean
+ default: false
+ additional_envs:
+ description: additional ENV variables to setup the job
+ type: string
+ data:
+ description: ci data
+ type: string
+ required: true
+ working-directory:
+ description: sets custom working directory
+ type: string
+ default: "$GITHUB_WORKSPACE/tests/ci"
+ secrets:
+ secret_envs:
+ description: if given, it's passed to the environments
+ required: false
+ AWS_SECRET_ACCESS_KEY:
+ description: the access key to the aws param store.
+ required: true
+ AWS_ACCESS_KEY_ID:
+ description: the access key id to the aws param store.
+ required: true
+ GPG_BINARY_SIGNING_KEY:
+ description: gpg signing key for packages.
+ required: true
+ GPG_BINARY_SIGNING_PASSPHRASE:
+ description: gpg signing key passphrase.
+ required: true
+
+env:
+ # Force the stdout and stderr streams to be unbuffered
+ PYTHONUNBUFFERED: 1
+ CHECK_NAME: ${{inputs.test_name}}
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ CLICKHOUSE_TEST_STAT_LOGIN: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }}
+ CLICKHOUSE_TEST_STAT_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }}
+ CLICKHOUSE_TEST_STAT_URL: ${{ secrets.CLICKHOUSE_TEST_STAT_URL }}
+ DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
+ ROBOT_TOKEN: ${{ secrets.ROBOT_TOKEN }}
+
+jobs:
+ runner_labels_setup:
+ name: Compute proper runner labels for the rest of the jobs
+ runs-on: ubuntu-latest
+ outputs:
+ runner_labels: ${{ steps.setVariables.outputs.runner_labels }}
+ steps:
+ - id: setVariables
+ name: Prepare runner_labels variables for the later steps
+ run: |
+
+ # Prepend self-hosted
+ input="self-hosted, altinity-on-demand, ${input}"
+
+ # Remove all whitespace
+ input="$(echo ${input} | tr -d [:space:])"
+ # Make something like a JSON array from comma-separated list
+ input="[ '${input//\,/\'\, \'}' ]"
+
+ echo "runner_labels=$input" >> ${GITHUB_OUTPUT}
+ env:
+ input: ${{ inputs.runner_type }}
+
+ Test:
+ needs: [runner_labels_setup]
+ runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }}
+ name: ${{inputs.test_name}}${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches > 1 && format('-{0}',matrix.batch) || '' }}
+ env:
+ GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}}${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches > 1 && format('-{0}',matrix.batch) || '' }}
+ strategy:
+ fail-fast: false # we always wait for entire matrix
+ matrix:
+ batch: ${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].batches }}
+ steps:
+ - name: Check out repository code
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6
+ with:
+ clear-repository: true
+ ref: ${{ fromJson(inputs.data).git_ref }}
+ submodules: ${{inputs.submodules}}
+ fetch-depth: ${{inputs.checkout_depth}}
+ filter: tree:0
+ - name: Set build envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ CHECK_NAME=${{ inputs.test_name }}
+ ${{inputs.additional_envs}}
+ ${{secrets.secret_envs}}
+ DOCKER_TAG< 1 }}
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ RUN_BY_HASH_NUM=${{matrix.batch}}
+ RUN_BY_HASH_TOTAL=${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches }}
+ EOF
+ - name: Pre run
+ run: |
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --pre --job-name '${{inputs.test_name}}'
+ - name: Sign release
+ env:
+ GPG_BINARY_SIGNING_KEY: ${{ secrets.GPG_BINARY_SIGNING_KEY }}
+ GPG_BINARY_SIGNING_PASSPHRASE: ${{ secrets.GPG_BINARY_SIGNING_PASSPHRASE }}
+ run: |
+ cd "${{ inputs.working-directory }}"
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" \
+ --infile ${{ toJson(inputs.data) }} \
+ --job-name '${{inputs.test_name}}' \
+ --run \
+ --force \
+ --run-command '''python3 sign_release.py'''
+ - name: Post run
+ if: ${{ !cancelled() }}
+ run: |
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --post --job-name '${{inputs.test_name}}'
+ - name: Mark as done
+ if: ${{ !cancelled() }}
+ run: |
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --mark-success --job-name '${{inputs.test_name}}' --batch ${{matrix.batch}}
+ - name: Upload signed hashes
+ uses: actions/upload-artifact@v4
+ with:
+ name: ${{inputs.test_name}} signed-hashes
+ path: ${{ env.TEMP_PATH }}/*.gpg
+ - name: Clean
+ if: always()
+ uses: ./.github/actions/clean
diff --git a/.github/workflows/reusable_simple_job.yml b/.github/workflows/reusable_simple_job.yml
index 247569c4f527..c13b6c88027e 100644
--- a/.github/workflows/reusable_simple_job.yml
+++ b/.github/workflows/reusable_simple_job.yml
@@ -66,7 +66,7 @@ jobs:
GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}}
steps:
- name: Check out repository code
- uses: ClickHouse/checkout@v1
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f
with:
clear-repository: true
ref: ${{ inputs.git_ref }}
diff --git a/.github/workflows/reusable_test.yml b/.github/workflows/reusable_test.yml
index e896239a5c30..5ff9ced10662 100644
--- a/.github/workflows/reusable_test.yml
+++ b/.github/workflows/reusable_test.yml
@@ -40,23 +40,79 @@ name: Testing workflow
type: string
default: "$GITHUB_WORKSPACE/tests/ci"
secrets:
- robot_git_token:
+ secret_envs:
+ description: if given, it's passed to the environments
required: false
- ci_db_url:
+ AWS_SECRET_ACCESS_KEY:
+ description: the access key to the aws s3 bucket.
+ required: true
+ AWS_ACCESS_KEY_ID:
+ description: the access key id to the aws s3 bucket.
+ required: true
+ CLICKHOUSE_TEST_STAT_LOGIN:
+ description: username for ci db.
+ required: true
+ CLICKHOUSE_TEST_STAT_PASSWORD:
+ description: password for ci db.
+ required: true
+ CLICKHOUSE_TEST_STAT_URL:
+ description: url for ci db.
+ required: true
+ DOCKER_PASSWORD:
+ description: token to upload docker images.
+ required: true
+ ROBOT_TOKEN:
+ description: token to update ci status.
+ required: true
+ AZURE_ACCOUNT_NAME:
+ description: Azure storage account name
required: false
- ci_db_password:
+ AZURE_STORAGE_KEY:
+ description: Azure storage access key
+ required: false
+ AZURE_CONTAINER_NAME:
+ description: Azure container name
required: false
-
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
CHECK_NAME: ${{inputs.test_name}}
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ CLICKHOUSE_TEST_STAT_LOGIN: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }}
+ CLICKHOUSE_TEST_STAT_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }}
+ CLICKHOUSE_TEST_STAT_URL: ${{ secrets.CLICKHOUSE_TEST_STAT_URL }}
+ DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
+ ROBOT_TOKEN: ${{ secrets.ROBOT_TOKEN }}
jobs:
+ runner_labels_setup:
+ name: Compute proper runner labels for the rest of the jobs
+ runs-on: ubuntu-latest
+ outputs:
+ runner_labels: ${{ steps.setVariables.outputs.runner_labels }}
+ steps:
+ - id: setVariables
+ name: Prepare runner_labels variables for the later steps
+ run: |
+
+ # Prepend self-hosted
+ input="self-hosted, altinity-on-demand, ${input}"
+
+ # Remove all whitespace
+ input="$(echo ${input} | tr -d [:space:])"
+ # Make something like a JSON array from comma-separated list
+ input="[ '${input//\,/\'\, \'}' ]"
+
+ echo "runner_labels=$input" >> ${GITHUB_OUTPUT}
+ env:
+ input: ${{ inputs.runner_type }}
+
Test:
- runs-on: [self-hosted, '${{inputs.runner_type}}']
- if: ${{ !failure() && !cancelled() && contains(fromJson(inputs.data).jobs_data.jobs_to_do, inputs.test_name) }}
+ needs: [runner_labels_setup]
+ runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }}
name: ${{inputs.test_name}}${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches > 1 && format('-{0}',matrix.batch) || '' }}
env:
GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}}${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches > 1 && format('-{0}',matrix.batch) || '' }}
@@ -66,7 +122,7 @@ jobs:
batch: ${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].batches }}
steps:
- name: Check out repository code
- uses: ClickHouse/checkout@v1
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6
with:
clear-repository: true
ref: ${{ fromJson(inputs.data).git_ref }}
@@ -78,23 +134,28 @@ jobs:
cat >> "$GITHUB_ENV" << 'EOF'
CHECK_NAME=${{ inputs.test_name }}
${{inputs.additional_envs}}
+ ${{secrets.secret_envs}}
DOCKER_TAG<> "$GITHUB_ENV"
+ echo "AZURE_STORAGE_KEY=${{ secrets.AZURE_STORAGE_KEY }}" >> "$GITHUB_ENV"
+ echo "AZURE_CONTAINER_NAME=${{ secrets.AZURE_CONTAINER_NAME }}" >> "$GITHUB_ENV"
+ echo "AZURE_STORAGE_ACCOUNT_URL=https://${{ secrets.AZURE_ACCOUNT_NAME }}.blob.core.windows.net/" >> "$GITHUB_ENV"
+ echo "Configured Azure credentials"
+ fi
- name: Common setup
uses: ./.github/actions/common_setup
with:
job_type: test
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: ${{ inputs.test_name }}
- name: Setup batch
if: ${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches > 1 }}
run: |
@@ -112,6 +173,7 @@ jobs:
--infile ${{ toJson(inputs.data) }} \
--job-name '${{inputs.test_name}}' \
--run \
+ --force \
--run-command '''${{inputs.run_command}}'''
# shellcheck disable=SC2319
echo "JOB_EXIT_CODE=$?" >> "$GITHUB_ENV"
@@ -123,6 +185,16 @@ jobs:
if: ${{ !cancelled() }}
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --mark-success --job-name '${{inputs.test_name}}' --batch ${{matrix.batch}}
+ - name: Update workflow report
+ if: ${{ !cancelled() }}
+ uses: ./.github/actions/create_workflow_report
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ CHECKS_DATABASE_HOST: ${{ secrets.CHECKS_DATABASE_HOST }}
+ CHECKS_DATABASE_USER: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }}
+ CHECKS_DATABASE_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }}
+ with:
+ final: false
- name: Clean
if: always()
uses: ./.github/actions/clean
diff --git a/.github/workflows/scheduled_runs.yml b/.github/workflows/scheduled_runs.yml
new file mode 100644
index 000000000000..dae3d1e25d9b
--- /dev/null
+++ b/.github/workflows/scheduled_runs.yml
@@ -0,0 +1,55 @@
+name: Scheduled Altinity Stable Builds
+
+on:
+ schedule:
+ - cron: '0 0 * * 6' #Weekly run for stable versions
+ - cron: '0 0 * * *' #Daily run for antalya versions
+ # Make sure that any changes to this file is actually tested with PRs
+ pull_request:
+ types:
+ - synchronize
+ - reopened
+ - opened
+ paths:
+ - '**/scheduled_runs.yml'
+
+jobs:
+ DailyRuns:
+ strategy:
+ fail-fast: false
+ matrix:
+ branch:
+ - antalya
+ name: ${{ matrix.branch }}
+ if: github.event.schedule != '0 0 * * 6'
+ runs-on: ubuntu-latest
+ steps:
+ - name: Run ${{ matrix.branch }} workflow
+ run: |
+ curl -L \
+ -X POST \
+ -H "Accept: application/vnd.github+json" \
+ -H "Authorization: Bearer ${{ secrets.TOKEN }}" \
+ -H "X-GitHub-Api-Version: 2022-11-28" \
+ https://api.github.com/repos/Altinity/ClickHouse/actions/workflows/release_branches.yml/dispatches \
+ -d '{"ref":"${{ matrix.branch }}"}'
+
+ WeeklyRuns:
+ strategy:
+ fail-fast: false
+ matrix:
+ branch:
+ - customizations/24.8.11
+ name: ${{ matrix.branch }}
+ if: github.event.schedule != '0 0 * * *'
+ runs-on: ubuntu-latest
+ steps:
+ - name: Run ${{ matrix.branch }} workflow
+ run: |
+ curl -L \
+ -X POST \
+ -H "Accept: application/vnd.github+json" \
+ -H "Authorization: Bearer ${{ secrets.TOKEN }}" \
+ -H "X-GitHub-Api-Version: 2022-11-28" \
+ https://api.github.com/repos/Altinity/ClickHouse/actions/workflows/release_branches.yml/dispatches \
+ -d '{"ref":"${{ matrix.branch }}"}'
diff --git a/.github/workflows/sign_and_release.yml b/.github/workflows/sign_and_release.yml
new file mode 100644
index 000000000000..c00dd4c9f43c
--- /dev/null
+++ b/.github/workflows/sign_and_release.yml
@@ -0,0 +1,426 @@
+name: Sign and Release packages
+
+on:
+ workflow_dispatch:
+ inputs:
+ workflow_url:
+ description: 'The URL to the workflow run that produced the packages'
+ required: true
+ release_environment:
+ description: 'The environment to release to. "staging" or "production"'
+ required: true
+ default: 'staging'
+ package_version:
+ description: 'The version of the package to release'
+ required: true
+ type: string
+ GPG_PASSPHRASE:
+ description: 'GPG passphrase for signing (required for production releases)'
+ required: false
+ type: string
+
+env:
+ AWS_REGION: us-east-1
+ S3_STORAGE_BUCKET: altinity-test-reports
+
+jobs:
+ extract-package-info:
+ runs-on: self-hosted, altinity-style-checker-aarch64
+ steps:
+ - name: Download artifact "build_report_package_release"
+ run: gh run download "$(echo "${{ inputs.workflow_url }}" | awk -F'/' '{print $NF}')" -n build_report_package_release
+
+ - name: Unzip Artifact
+ run: |
+ # Locate the downloaded zip file.
+ ZIP_FILE=$(ls | grep "build_report_package_release.*\.zip" | head -n 1)
+ if [ -z "$ZIP_FILE" ]; then
+ echo "No zip file found for the artifact."
+ exit 1
+ fi
+ echo "Found zip file: ${ZIP_FILE}"
+ unzip -o "$ZIP_FILE" -d artifact
+
+ - name: Extract JSON File
+ run: |
+ cd artifact
+ # Find the JSON file with a name like build_report...package_release.json
+ JSON_FILE=$(ls | grep "build_report.*package_release\.json" | head -n 1)
+ if [ -z "$JSON_FILE" ]; then
+ echo "No JSON file matching the pattern was found."
+ exit 1
+ fi
+ echo "Found JSON file: ${JSON_FILE}"
+
+ - name: Parse JSON file
+ run: |
+ # Use jq to select the URL that ends with clickhouse-client-*-amd64.tgz
+ CLIENT_URL=$(jq -r '.build_urls[] | select(test("clickhouse-client-.*-amd64\\.tgz$"))' "$JSON_FILE")
+ if [ -z "$CLIENT_URL" ]; then
+ echo "No matching client URL found in JSON."
+ exit 1
+ fi
+ echo "Found client URL: ${CLIENT_URL}"
+
+ - name: Extract information
+ run: |
+ if ! [[ "$CLIENT_URL" =~ https://s3\.amazonaws\.com/([^/]+)/([^/]+)/([^/]+)/([^/]+)/([^/]+)/clickhouse-client-([^-]+)-amd64\.tgz$ ]]; then
+ echo "The client URL did not match the expected pattern."
+ exit 1
+ fi
+
+ - name: Process information
+ run: |
+ SRC_BUCKET="${BASH_REMATCH[1]}"
+ PACKAGE_VERSION="${BASH_REMATCH[6]}"
+ FOLDER_TIME=$(date -u +"%Y-%m-%dT%H-%M-%S.%3N")
+
+ if [[ "${BASH_REMATCH[2]}" == "PRs" ]]; then
+ SRC_DIR="${BASH_REMATCH[2]}/${BASH_REMATCH[3]}"
+ COMMIT_HASH="${BASH_REMATCH[4]}"
+ PR_NUMBER="${BASH_REMATCH[3]}"
+ DOCKER_VERSION="${PR_NUMBER}"
+ TEST_RESULTS_SRC="${PR_NUMBER}"
+ else
+ SRC_DIR="${BASH_REMATCH[2]}"
+ COMMIT_HASH="${BASH_REMATCH[3]}"
+ DOCKER_VERSION="0"
+ TEST_RESULTS_SRC="0"
+ fi
+
+ - name: Verify package version
+ run: |
+ if [ "$PACKAGE_VERSION" != "${{ inputs.package_version }}" ]; then
+ echo "Error: Extracted package version ($PACKAGE_VERSION) does not match input package version (${{ inputs.package_version }})"
+ exit 1
+ fi
+
+ - name: Extract major version and determine if binary processing is needed
+ run: |
+ MAJOR_VERSION=$(echo "$PACKAGE_VERSION" | cut -d. -f1)
+ if [ "$MAJOR_VERSION" -ge 24 ]; then
+ NEEDS_BINARY_PROCESSING="true"
+ else
+ NEEDS_BINARY_PROCESSING="false"
+ fi
+
+ - name: Extract feature and set repo prefix
+ run: |
+ # Extract the feature from PACKAGE_VERSION (everything after the last dot)
+ ALTINITY_BUILD_FEATURE=$(echo "$PACKAGE_VERSION" | rev | cut -d. -f1 | rev)
+ echo "ALTINITY_BUILD_FEATURE=${ALTINITY_BUILD_FEATURE}" >> $GITHUB_ENV
+
+ # Set REPO_PREFIX based on the feature
+ case "$ALTINITY_BUILD_FEATURE" in
+ "altinityhotfix")
+ echo "REPO_PREFIX=hotfix-" >> $GITHUB_ENV
+ ;;
+ "altinityfips")
+ echo "REPO_PREFIX=fips-" >> $GITHUB_ENV
+ ;;
+ "altinityantalya")
+ echo "REPO_PREFIX=antalya-" >> $GITHUB_ENV
+ ;;
+ "altinitystable"|"altinitytest")
+ echo "REPO_PREFIX=" >> $GITHUB_ENV
+ ;;
+ *)
+ echo "Build feature not supported: ${ALTINITY_BUILD_FEATURE}"
+ exit 1
+ ;;
+ esac
+
+ - name: Check extracted information
+ run: |
+ echo "Extracted information:"
+ echo "altinity_build_feature: ${ALTINITY_BUILD_FEATURE}"
+ echo "commit_hash: ${COMMIT_HASH}"
+ echo "docker_version: ${DOCKER_VERSION}"
+ echo "folder_time: ${FOLDER_TIME}"
+ echo "major_version: ${MAJOR_VERSION}"
+ echo "needs_binary_processing: ${NEEDS_BINARY_PROCESSING}"
+ echo "package_version: ${PACKAGE_VERSION}"
+ echo "repo_prefix: ${REPO_PREFIX}"
+ echo "src_bucket: ${SRC_BUCKET}"
+ echo "src_dir: ${SRC_DIR}"
+ echo "test_results_src: ${TEST_RESULTS_SRC}"
+
+ - name: Set environment variables
+ run: |
+ # Set environment variables for use in subsequent jobs
+ echo "COMMIT_HASH=${COMMIT_HASH}" >> $GITHUB_ENV
+ echo "DOCKER_VERSION=${DOCKER_VERSION}" >> $GITHUB_ENV
+ echo "FOLDER_TIME=${FOLDER_TIME}" >> $GITHUB_ENV
+ echo "NEEDS_BINARY_PROCESSING=${NEEDS_BINARY_PROCESSING}" >> $GITHUB_ENV
+ echo "PACKAGE_VERSION=${PACKAGE_VERSION}" >> $GITHUB_ENV
+ echo "SRC_BUCKET=${SRC_BUCKET}" >> $GITHUB_ENV
+ echo "SRC_DIR=${SRC_DIR}" >> $GITHUB_ENV
+ echo "TEST_RESULTS_SRC=${TEST_RESULTS_SRC}" >> $GITHUB_ENV
+ echo "SRC_URL=s3://${SRC_BUCKET}/${SRC_DIR}/${COMMIT_HASH}" >> $GITHUB_ENV
+ echo "DEST_URL=s3://${S3_STORAGE_BUCKET}/builds/stable/v${PACKAGE_VERSION}/${FOLDER_TIME}" >> $GITHUB_ENV
+
+ copy-packages:
+ needs: extract-package-info
+ runs-on: self-hosted, altinity-style-checker
+ env:
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ steps:
+ - name: Download signed hash artifacts
+ run: |
+ # Download both signed hash artifacts
+ gh run download "$(echo "${{ inputs.workflow_url }}" | awk -F'/' '{print $NF}')" -n "Sign release signed-hashes"
+ gh run download "$(echo "${{ inputs.workflow_url }}" | awk -F'/' '{print $NF}')" -n "Sign aarch64 signed-hashes"
+
+ # Unzip both artifacts
+ for zip in *signed-hashes*.zip; do
+ unzip -o "$zip" -d signed-hashes
+ done
+
+ - name: Copy ARM packages
+ run: |
+ if ! aws s3 sync "${SRC_URL}/package_aarch64/" "${DEST_URL}/packages/ARM_PACKAGES/""; then
+ echo "Failed to copy AMD packages"
+ exit 1
+ fi
+
+ - name: Verify ARM packages
+ run: |
+ cd signed-hashes
+ for file in ../${DEST_URL}/packages/ARM_PACKAGES/**/*; do
+ if [ -f "$file" ]; then
+ echo "Verifying $file..."
+ if ! gpg --verify "Sign aarch64 signed-hashes/$(basename "$file").sha256.gpg" 2>/dev/null; then
+ echo "GPG verification failed for $file"
+ exit 1
+ fi
+ if ! sha256sum -c "Sign aarch64 signed-hashes/$(basename "$file").sha256.gpg" 2>/dev/null; then
+ echo "SHA256 verification failed for $file"
+ exit 1
+ fi
+ fi
+ done
+
+ - name: Separate ARM binary
+ run: |
+ aws s3 mv "${DEST_URL}/packages/ARM_PACKAGES/clickhouse" "${DEST_URL}/packages/ARM_PACKAGES/arm-bin/clickhouse"
+ aws s3 mv "${DEST_URL}/packages/ARM_PACKAGES/clickhouse-stripped" "${DEST_URL}/packages/ARM_PACKAGES/arm-bin/clickhouse-stripped"
+
+ - name: Copy AMD packages
+ run: |
+ if ! aws s3 sync "${SRC_URL}/package_release/" "${DEST_URL}/packages/AMD_PACKAGES/"; then
+ echo "Failed to copy AMD packages"
+ exit 1
+ fi
+
+ - name: Verify AMD packages
+ run: |
+ cd signed-hashes
+ for file in ../${DEST_URL}/packages/AMD_PACKAGES/**/*; do
+ if [ -f "$file" ]; then
+ echo "Verifying $file..."
+ if ! gpg --verify "Sign release signed-hashes/$(basename "$file").sha256.gpg" 2>/dev/null; then
+ echo "GPG verification failed for $file"
+ exit 1
+ fi
+ if ! sha256sum -c "Sign release signed-hashes/$(basename "$file").sha256.gpg" 2>/dev/null; then
+ echo "SHA256 verification failed for $file"
+ exit 1
+ fi
+ fi
+ done
+
+ - name: Separate AMD binary
+ run: |
+ aws s3 mv "${DEST_URL}/packages/AMD_PACKAGES/clickhouse" "${DEST_URL}/packages/AMD_PACKAGES/amd-bin/clickhouse"
+ aws s3 mv "${DEST_URL}/packages/AMD_PACKAGES/clickhouse-stripped" "${DEST_URL}/packages/AMD_PACKAGES/amd-bin/clickhouse-stripped"
+
+ - name: Process ARM binary
+ if: ${{ env.NEEDS_BINARY_PROCESSING == 'true' }}
+ run: |
+ echo "Downloading clickhouse binary..."
+ if ! aws s3 cp "${SRC_URL}/package_release/clickhouse" clickhouse; then
+ echo "Failed to download clickhouse binary"
+ exit 1
+ fi
+ chmod +x clickhouse
+
+ echo "Running clickhouse binary..."
+ ./clickhouse -q'q'
+
+ echo "Stripping the binary..."
+ strip clickhouse -o clickhouse-stripped
+
+ echo "Uploading processed binaries..."
+ if ! aws s3 cp clickhouse "${DEST_URL}/packages/ARM_PACKAGES/arm-bin/non-self-extracting/"; then
+ echo "Failed to upload clickhouse binary"
+ exit 1
+ fi
+ if ! aws s3 cp clickhouse-stripped "${DEST_URL}/packages/ARM_PACKAGES/arm-bin/non-self-extracting/"; then
+ echo "Failed to upload stripped clickhouse binary"
+ exit 1
+ fi
+
+ copy-test-results:
+ needs: extract-package-info
+ runs-on: self-hosted, altinity-style-checker-aarch64
+ env:
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ steps:
+ - name: Copy test results to S3
+ run: |
+ # Copy test results
+ echo "Copying test results..."
+ if ! aws s3 sync "s3://${SRC_BUCKET}/${TEST_RESULTS_SRC}/${COMMIT_HASH}" \
+ "${DEST_URL}/test_results/"; then
+ echo "Failed to copy test results"
+ exit 1
+ fi
+
+ publish-docker:
+ needs: extract-package-info
+ strategy:
+ matrix:
+ image_type: [server, keeper]
+ variant: ['', '-alpine']
+ runs-on: self-hosted, altinity-style-checker-aarch64
+ env:
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ steps:
+ - name: Publish Docker Image
+ id: publish
+ uses: ./.github/workflows/docker_publish.yml
+ with:
+ docker_image: altinityinfra/clickhouse-${{ matrix.image_type }}:${{ env.DOCKER_VERSION }}-${{ env.PACKAGE_VERSION }}${{ matrix.variant }}
+ release_environment: ${{ inputs.release_environment }}
+ upload_artifacts: false
+ secrets: inherit
+
+ - name: Upload Docker images to S3
+ run: |
+ # Upload Docker images to S3
+ echo "Uploading Docker images to S3..."
+ if ! aws s3 sync "${{ steps.publish.outputs.image_archives_path }}/" \
+ "${DEST_URL}/docker_images/${{ matrix.image_type }}${{ matrix.variant }}/"; then
+ echo "Failed to upload Docker images"
+ exit 1
+ fi
+
+ sign-and-publish:
+ needs: [copy-packages]
+ runs-on: arc-runners-clickhouse-signer
+ env:
+ GPG_PASSPHRASE: ${{ inputs.release_environment == 'production' && inputs.GPG_PASSPHRASE || secrets.GPG_PASSPHRASE }}
+ REPO_DNS_NAME: ${{ inputs.release_environment == 'production' && 'builds.altinity.cloud' || 'builds.staging.altinity.cloud' }}
+ REPO_NAME: ${{ inputs.release_environment == 'production' && 'altinity' || 'altinity-staging' }}
+ REPO_SUBTITLE: ${{ inputs.release_environment == 'production' && 'Stable Builds' || 'Staging Builds' }}
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+ with:
+ repository: Altinity/ClickHouse
+ ref: antalya
+ path: ClickHouse
+
+ - name: Download packages
+ run: |
+ if ! aws s3 cp "${DEST_URL}/packages/ARM_PACKAGES/" $RUNNER_TEMP/packages --recursive; then
+ echo "Failed to download ARM packages"
+ exit 1
+ fi
+ if ! aws s3 cp "${DEST_URL}/packages/AMD_PACKAGES/" $RUNNER_TEMP/packages --recursive; then
+ echo "Failed to download AMD packages"
+ exit 1
+ fi
+ env:
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+
+ - name: Process ARM binary
+ if: ${{ env.NEEDS_BINARY_PROCESSING == 'true' }}
+ run: |
+ chmod +x $RUNNER_TEMP/packages/arm-bin/clickhouse
+
+ echo "Running clickhouse binary..."
+ $RUNNER_TEMP/packages/arm-bin/clickhouse -q'q'
+
+ echo "Stripping the binary..."
+ strip $RUNNER_TEMP/packages/arm-bin/non-self-extracting/clickhouse -o $RUNNER_TEMP/packages/arm-bin/non-self-extracting/clickhouse-stripped
+
+ - name: Setup GPG
+ run: |
+ if [ -z ${GPG_PASSPHRASE} ]
+ then
+ echo "GPG_PASSPHRASE is not set"
+ exit 1
+ fi
+
+ - name: Process GPG key
+ run: |
+ echo "Processing GPG key..."
+ if ! aws secretsmanager get-secret-value --secret-id arn:aws:secretsmanager:us-east-1:446527654354:secret:altinity_staging_gpg-Rqbe8S --query SecretString --output text | sed -e "s/^'//" -e "s/'$//" | jq -r '.altinity_staging_gpg | @base64d' | gpg --batch --import; then
+ echo "Failed to import GPG key"
+ exit 1
+ fi
+ gpg --list-secret-keys --with-keygrip
+ gpgconf --kill gpg-agent
+ gpg-agent --daemon --allow-preset-passphrase
+ if ! aws ssm get-parameter --name /gitlab-runner/key-encrypting-key --with-decryption --query Parameter.Value --output text | sudo tee /root/.key-encrypting-key >/dev/null; then
+ echo "Failed to get key encrypting key"
+ exit 1
+ fi
+ GPG_KEY_NAME=$(gpg --list-secret-keys | grep uid | head --lines 1 | tr -s " " | cut -d " " -f 4-)
+ GPG_KEY_ID=$(gpg --list-secret-keys --with-keygrip "${GPG_KEY_NAME}" | grep Keygrip | head --lines 1 | tr -s " " | cut -d " " -f 4)
+ echo "$GPG_PASSPHRASE" | base64 -d | sudo openssl enc -d -aes-256-cbc -pbkdf2 -pass file:/root/.key-encrypting-key -in - -out - | /usr/lib/gnupg/gpg-preset-passphrase --preset $GPG_KEY_ID
+
+ - name: Run Ansible playbook
+ run: |
+ echo "Running Ansible playbook for signing and publishing..."
+ echo "ansible-playbook -i ClickHouse/tests/ci/release/packaging/ansible/inventory/localhost.yml -e aws_region=$AWS_REGION -e gpg_key_id=\"$GPG_KEY_ID\" -e gpg_key_name=\"$GPG_KEY_NAME\"-e local_repo_path="/home/runner/.cache/${{ inputs.release_environment }}" -e pkgver=\"${PACKAGE_VERSION}\" -e release_environment=$RELEASE_ENVIRONMENT -e repo_dns_name=$REPO_DNS_NAME -e repo_name=$REPO_NAME -e repo_prefix=\"$REPO_PREFIX\" -e repo_subtitle=\"$REPO_SUBTITLE\" -e s3_pkgs_bucket=$S3_STORAGE_BUCKET -e s3_pkgs_path=\"builds/stable/v${PACKAGE_VERSION}/${FOLDER_TIME}\" -e repo_path=\"/home/runner/.cache/${{ inputs.release_environment }}\" ClickHouse/tests/ci/release/packaging/ansible/sign-and-release.yml "
+ if ! ansible-playbook -i ClickHouse/tests/ci/release/packaging/ansible/inventory/localhost.yml \
+ -e aws_region=$AWS_REGION \
+ -e gpg_key_id="$GPG_KEY_ID" \
+ -e gpg_key_name="$GPG_KEY_NAME" \
+ -e local_repo_path="/home/runner/.cache/${{ inputs.release_environment }}" \
+ -e pkgver="${PACKAGE_VERSION}" \
+ -e release_environment=$RELEASE_ENVIRONMENT \
+ -e repo_dns_name=$REPO_DNS_NAME \
+ -e repo_name=$REPO_NAME \
+ -e repo_prefix="$REPO_PREFIX" \
+ -e repo_subtitle="$REPO_SUBTITLE" \
+ -e s3_pkgs_bucket=$S3_STORAGE_BUCKET \
+ -e s3_pkgs_path="builds/stable/v${PACKAGE_VERSION}/${FOLDER_TIME}" \
+ ClickHouse/tests/ci/release/packaging/ansible/sign-and-release.yml; then
+ echo "Ansible playbook failed"
+ exit 1
+ fi
+ gpgconf --kill gpg-agent
+ ls -hal
+
+ - name: Cleanup temporary files
+ if: always()
+ run: |
+ echo "Cleaning up temporary files..."
+ rm -f $RUNNER_TEMP/clickhouse* || true
+
+ repo-sanity-check:
+ needs: sign-and-publish
+ uses: Altinity/ClickHouse/.github/workflows/repo-sanity-checks.yml@antalya
+
+ copy-to-released:
+ needs: [sign-and-publish]
+ if: ${{ inputs.release_environment == 'production' }}
+ runs-on: self-hosted, altinity-style-checker-aarch64
+ env:
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ steps:
+ - name: Copy to released directory
+ run: |
+ echo "Copying to released directory..."
+ if ! aws s3 sync "${DEST_URL}/" "s3://${S3_STORAGE_BUCKET}/builds/released/v${PACKAGE_VERSION}/"; then
+ echo "Failed to copy to released directory"
+ exit 1
+ fi
diff --git a/cmake/autogenerated_versions.txt b/cmake/autogenerated_versions.txt
index 9a770b0aa1a9..7b080952ac7b 100644
--- a/cmake/autogenerated_versions.txt
+++ b/cmake/autogenerated_versions.txt
@@ -7,6 +7,10 @@ SET(VERSION_MAJOR 25)
SET(VERSION_MINOR 3)
SET(VERSION_PATCH 7)
SET(VERSION_GITHASH 39f0a39e4bba45eea18c12814607719245ba3153)
-SET(VERSION_DESCRIBE v25.3.7.1-lts)
-SET(VERSION_STRING 25.3.7.1)
+#10000 for altinitystable candidates
+#20000 for altinityedge candidates
+SET(VERSION_TWEAK 10000)
+SET(VERSION_FLAVOUR altinitytest)
+SET(VERSION_DESCRIBE v25.3.7.10000.altinitytest))
+SET(VERSION_STRING 25.3.7.10000.altinitytest))
# end of autochange
diff --git a/cmake/version.cmake b/cmake/version.cmake
index 9ca21556f4d4..b008c989c0b0 100644
--- a/cmake/version.cmake
+++ b/cmake/version.cmake
@@ -3,9 +3,10 @@ include(${PROJECT_SOURCE_DIR}/cmake/autogenerated_versions.txt)
set(VERSION_EXTRA "" CACHE STRING "")
set(VERSION_TWEAK "" CACHE STRING "")
-if (VERSION_TWEAK)
- string(CONCAT VERSION_STRING ${VERSION_STRING} "." ${VERSION_TWEAK})
-endif ()
+# NOTE(vnemkov): we rely on VERSION_TWEAK portion to be already present in VERSION_STRING
+# if (VERSION_TWEAK)
+# string(CONCAT VERSION_STRING ${VERSION_STRING} "." ${VERSION_TWEAK})
+# endif ()
if (VERSION_EXTRA)
string(CONCAT VERSION_STRING ${VERSION_STRING} "." ${VERSION_EXTRA})
@@ -19,5 +20,5 @@ set (VERSION_STRING_SHORT "${VERSION_MAJOR}.${VERSION_MINOR}")
math (EXPR VERSION_INTEGER "${VERSION_PATCH} + ${VERSION_MINOR}*1000 + ${VERSION_MAJOR}*1000000")
if(CLICKHOUSE_OFFICIAL_BUILD)
- set(VERSION_OFFICIAL " (official build)")
+ set(VERSION_OFFICIAL " (altinity build)")
endif()
diff --git a/contrib/google-protobuf-cmake/CMakeLists.txt b/contrib/google-protobuf-cmake/CMakeLists.txt
index 9df9d3e00268..ea99a20c9731 100644
--- a/contrib/google-protobuf-cmake/CMakeLists.txt
+++ b/contrib/google-protobuf-cmake/CMakeLists.txt
@@ -369,6 +369,7 @@ else ()
execute_process(
COMMAND ${CMAKE_COMMAND}
"-G${CMAKE_GENERATOR}"
+ "-DCMAKE_POLICY_VERSION_MINIMUM=3.5"
"-DCMAKE_MAKE_PROGRAM=${CMAKE_MAKE_PROGRAM}"
"-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}"
"-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}"
diff --git a/contrib/grpc-cmake/CMakeLists.txt b/contrib/grpc-cmake/CMakeLists.txt
index 6dfa9a39583c..52cc8458caf5 100644
--- a/contrib/grpc-cmake/CMakeLists.txt
+++ b/contrib/grpc-cmake/CMakeLists.txt
@@ -78,6 +78,7 @@ if (NOT CMAKE_HOST_SYSTEM_NAME STREQUAL CMAKE_SYSTEM_NAME
execute_process(
COMMAND ${CMAKE_COMMAND}
"-G${CMAKE_GENERATOR}"
+ "-DCMAKE_POLICY_VERSION_MINIMUM=3.5"
"-DCMAKE_MAKE_PROGRAM=${CMAKE_MAKE_PROGRAM}"
"-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}"
"-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}"
@@ -134,6 +135,7 @@ if (NOT CMAKE_HOST_SYSTEM_NAME STREQUAL CMAKE_SYSTEM_NAME
execute_process(
COMMAND ${CMAKE_COMMAND}
"-G${CMAKE_GENERATOR}"
+ "-DCMAKE_POLICY_VERSION_MINIMUM=3.5"
"-DCMAKE_MAKE_PROGRAM=${CMAKE_MAKE_PROGRAM}"
"-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}"
"-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}"
diff --git a/contrib/openssl-cmake/CMakeLists.txt b/contrib/openssl-cmake/CMakeLists.txt
index 6f44a93b0300..5ffc6f2f5d2b 100644
--- a/contrib/openssl-cmake/CMakeLists.txt
+++ b/contrib/openssl-cmake/CMakeLists.txt
@@ -8,6 +8,9 @@ if(NOT ENABLE_SSL)
return()
endif()
+project(ch-openssl)
+cmake_minimum_required(VERSION 3.5)
+
# Below build description was generated from these steps:
# - Checkout OpenSSL in the desired version (e.g. 3.2)
# - Take a brief look (but not too long to save your mental sanity) at the supported build options (*)
diff --git a/contrib/sparse-checkout/update-aws.sh b/contrib/sparse-checkout/update-aws.sh
index 3b449e6729a3..19820bd8dcfa 100755
--- a/contrib/sparse-checkout/update-aws.sh
+++ b/contrib/sparse-checkout/update-aws.sh
@@ -8,6 +8,7 @@ echo '!/*/*' >> $FILES_TO_CHECKOUT
echo '/src/aws-cpp-sdk-core/*' >> $FILES_TO_CHECKOUT
echo '/generated/src/aws-cpp-sdk-s3/*' >> $FILES_TO_CHECKOUT
echo '/generated/src/aws-cpp-sdk-aws/*' >> $FILES_TO_CHECKOUT
+echo '/generated/src/aws-cpp-sdk-glue/*' >> $FILES_TO_CHECKOUT
git config core.sparsecheckout true
git checkout $1
diff --git a/docker/docs/builder/Dockerfile b/docker/docs/builder/Dockerfile
index ad69ea1aec09..c7664d54eae9 100644
--- a/docker/docs/builder/Dockerfile
+++ b/docker/docs/builder/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/docs-builder .
+# docker build -t altinityinfra/docs-builder .
FROM golang:alpine AS htmltest-builder
ARG HTMLTEST_VERSION=0.17.0
diff --git a/docker/images.json b/docker/images.json
index 942cf9606c69..91d53ea2cb0d 100644
--- a/docker/images.json
+++ b/docker/images.json
@@ -1,122 +1,121 @@
{
"docker/packager/binary-builder": {
- "name": "clickhouse/binary-builder",
+ "name": "altinityinfra/binary-builder",
"dependent": []
},
"docker/packager/cctools": {
- "name": "clickhouse/cctools",
+ "name": "altinityinfra/cctools",
"dependent": []
},
"docker/test/compatibility/centos": {
- "name": "clickhouse/test-old-centos",
+ "name": "altinityinfra/test-old-centos",
"dependent": []
},
"docker/test/compatibility/ubuntu": {
- "name": "clickhouse/test-old-ubuntu",
+ "name": "altinityinfra/test-old-ubuntu",
"dependent": []
},
"docker/test/integration/base": {
- "name": "clickhouse/integration-test",
- "dependent": [
+ "only_amd64": true,
+ "name": "altinityinfra/integration-test",
+ "dependent": [
"docker/test/integration/clickhouse_with_unity_catalog"
]
},
"docker/test/fuzzer": {
- "name": "clickhouse/fuzzer",
+ "name": "altinityinfra/fuzzer",
"dependent": []
},
"docker/test/libfuzzer": {
- "name": "clickhouse/libfuzzer",
+ "name": "altinityinfra/libfuzzer",
"dependent": []
},
"docker/test/performance-comparison": {
- "name": "clickhouse/performance-comparison",
+ "name": "altinityinfra/performance-comparison",
"dependent": []
},
"docker/test/util": {
- "name": "clickhouse/test-util",
+ "name": "altinityinfra/test-util",
"dependent": [
"docker/test/base",
"docker/test/fasttest"
]
},
"docker/test/stateless": {
- "name": "clickhouse/stateless-test",
+ "name": "altinityinfra/stateless-test",
"dependent": [
"docker/test/stateful"
]
},
"docker/test/stateful": {
- "name": "clickhouse/stateful-test",
+ "name": "altinityinfra/stateful-test",
"dependent": [
"docker/test/stress"
]
},
"docker/test/unit": {
- "name": "clickhouse/unit-test",
+ "name": "altinityinfra/unit-test",
"dependent": []
},
"docker/test/stress": {
- "name": "clickhouse/stress-test",
+ "name": "altinityinfra/stress-test",
"dependent": []
},
"docker/test/integration/runner": {
- "name": "clickhouse/integration-tests-runner",
+ "only_amd64": true,
+ "name": "altinityinfra/integration-tests-runner",
"dependent": []
},
"docker/test/fasttest": {
- "name": "clickhouse/fasttest",
+ "name": "altinityinfra/fasttest",
"dependent": [
"docker/packager/binary-builder"
]
},
- "docker/test/style": {
- "name": "clickhouse/style-test",
- "dependent": []
- },
"docker/test/integration/s3_proxy": {
- "name": "clickhouse/s3-proxy",
+ "name": "altinityinfra/s3-proxy",
"dependent": []
},
"docker/test/integration/resolver": {
- "name": "clickhouse/python-bottle",
+ "name": "altinityinfra/python-bottle",
"dependent": []
},
"docker/test/integration/helper_container": {
- "name": "clickhouse/integration-helper",
+ "only_amd64": true,
+ "name": "altinityinfra/integration-helper",
"dependent": []
},
"docker/test/integration/mysql_golang_client": {
- "name": "clickhouse/mysql-golang-client",
+ "name": "altinityinfra/mysql-golang-client",
"dependent": []
},
"docker/test/integration/dotnet_client": {
- "name": "clickhouse/dotnet-client",
+ "name": "altinityinfra/dotnet-client",
"dependent": []
},
"docker/test/integration/mysql_java_client": {
- "name": "clickhouse/mysql-java-client",
+ "name": "altinityinfra/mysql-java-client",
"dependent": []
},
"docker/test/integration/mysql_js_client": {
- "name": "clickhouse/mysql-js-client",
+ "name": "altinityinfra/mysql-js-client",
"dependent": []
},
"docker/test/integration/mysql_php_client": {
- "name": "clickhouse/mysql-php-client",
+ "name": "altinityinfra/mysql-php-client",
"dependent": []
},
"docker/test/integration/postgresql_java_client": {
- "name": "clickhouse/postgresql-java-client",
+ "name": "altinityinfra/postgresql-java-client",
"dependent": []
},
"docker/test/integration/kerberos_kdc": {
"only_amd64": true,
- "name": "clickhouse/kerberos-kdc",
+ "name": "altinityinfra/kerberos-kdc",
"dependent": []
},
"docker/test/base": {
- "name": "clickhouse/test-base",
+ "name": "altinityinfra/test-base",
"dependent": [
"docker/test/clickbench",
"docker/test/fuzzer",
@@ -131,47 +130,43 @@
]
},
"docker/test/sqlancer": {
- "name": "clickhouse/sqlancer-test",
+ "name": "altinityinfra/sqlancer-test",
"dependent": []
},
"docker/test/keeper-jepsen": {
- "name": "clickhouse/keeper-jepsen-test",
+ "name": "altinityinfra/keeper-jepsen-test",
"dependent": []
},
"docker/test/server-jepsen": {
- "name": "clickhouse/server-jepsen-test",
+ "name": "altinityinfra/server-jepsen-test",
"dependent": []
},
"docker/test/clickbench": {
- "name": "clickhouse/clickbench",
+ "name": "altinityinfra/clickbench",
"dependent": []
},
"docker/test/install/deb": {
- "name": "clickhouse/install-deb-test",
+ "name": "altinityinfra/install-deb-test",
"dependent": []
},
"docker/test/install/rpm": {
- "name": "clickhouse/install-rpm-test",
- "dependent": []
- },
- "docker/docs/builder": {
- "name": "clickhouse/docs-builder",
+ "name": "altinityinfra/install-rpm-test",
"dependent": []
},
"docker/test/sqllogic": {
- "name": "clickhouse/sqllogic-test",
+ "name": "altinityinfra/sqllogic-test",
"dependent": []
},
"docker/test/sqltest": {
- "name": "clickhouse/sqltest",
+ "name": "altinityinfra/sqltest",
"dependent": []
},
"docker/test/integration/nginx_dav": {
- "name": "clickhouse/nginx-dav",
+ "name": "altinityinfra/nginx-dav",
"dependent": []
},
"docker/test/integration/clickhouse_with_unity_catalog": {
- "name": "clickhouse/integration-test-with-unity-catalog",
+ "name": "altinityinfra/integration-test-with-unity-catalog",
"dependent": []
}
}
diff --git a/docker/keeper/Dockerfile b/docker/keeper/Dockerfile
index ad94703045ed..8f8cc46285b7 100644
--- a/docker/keeper/Dockerfile
+++ b/docker/keeper/Dockerfile
@@ -16,7 +16,7 @@ RUN arch=${TARGETARCH:-amd64} \
esac
-FROM alpine
+FROM alpine:3.21
ENV LANG=en_US.UTF-8 \
LANGUAGE=en_US:en \
diff --git a/docker/packager/binary-builder/Dockerfile b/docker/packager/binary-builder/Dockerfile
index 648d8638aa2e..7e2aabeb7d5d 100644
--- a/docker/packager/binary-builder/Dockerfile
+++ b/docker/packager/binary-builder/Dockerfile
@@ -1,11 +1,13 @@
-# docker build -t clickhouse/binary-builder .
+# docker build -t altinityinfra/binary-builder .
ARG FROM_TAG=latest
-FROM clickhouse/fasttest:$FROM_TAG
+FROM altinityinfra/fasttest:$FROM_TAG
+# NOTE(strtgbb) Not sure where LLVM_VERSION is set, so we set it here
+ENV LLVM_VERSION=19
ENV CC=clang-${LLVM_VERSION}
ENV CXX=clang++-${LLVM_VERSION}
# If the cctools is updated, then first build it in the CI, then update here in a different commit
-COPY --from=clickhouse/cctools:d9e3596e706b /cctools /cctools
+COPY --from=altinityinfra/cctools:0de95b3aeb28 /cctools /cctools
# Rust toolchain and libraries
ENV RUSTUP_HOME=/rust/rustup
diff --git a/docker/packager/binary-builder/build.sh b/docker/packager/binary-builder/build.sh
index dc1837b869ec..bb9a5ea20882 100755
--- a/docker/packager/binary-builder/build.sh
+++ b/docker/packager/binary-builder/build.sh
@@ -176,7 +176,8 @@ then
git -C "$PERF_OUTPUT"/ch reset --soft pr
git -C "$PERF_OUTPUT"/ch log -5
# Unlike git log, git show requires trees
- git -C "$PERF_OUTPUT"/ch show -s
+ # NOTE(strtgbb) the show command fails in our ci - fatal: unable to read tree ...
+ # git -C "$PERF_OUTPUT"/ch show -s
(
cd "$PERF_OUTPUT"/..
tar -cv --zstd -f /output/performance.tar.zst output
diff --git a/docker/packager/cctools/Dockerfile b/docker/packager/cctools/Dockerfile
index 570a42d42d51..3555bf7c428b 100644
--- a/docker/packager/cctools/Dockerfile
+++ b/docker/packager/cctools/Dockerfile
@@ -1,10 +1,10 @@
-# docker build -t clickhouse/cctools .
+# docker build -t altinityinfra/cctools .
-# This is a hack to significantly reduce the build time of the clickhouse/binary-builder
+# This is a hack to significantly reduce the build time of the altinityinfra/binary-builder
# It's based on the assumption that we don't care of the cctools version so much
-# It event does not depend on the clickhouse/fasttest in the `docker/images.json`
+# It event does not depend on the altinityinfra/fasttest in the `docker/images.json`
ARG FROM_TAG=latest
-FROM clickhouse/fasttest:$FROM_TAG as builder
+FROM altinityinfra/fasttest:$FROM_TAG as builder
ENV CC=clang-${LLVM_VERSION}
ENV CXX=clang++-${LLVM_VERSION}
diff --git a/docker/packager/packager b/docker/packager/packager
index f720e6492f02..88bd6ae21d06 100755
--- a/docker/packager/packager
+++ b/docker/packager/packager
@@ -6,12 +6,13 @@ import os
import subprocess
import sys
from pathlib import Path
-from typing import List, Optional
+from typing import Dict, List, Optional
SCRIPT_PATH = Path(__file__).absolute()
IMAGE_TYPE = "binary-builder"
-IMAGE_NAME = f"clickhouse/{IMAGE_TYPE}"
-
+IMAGE_NAME = f"altinityinfra/{IMAGE_TYPE}"
+DEFAULT_TMP_PATH = SCRIPT_PATH.parent.absolute() / 'tmp'
+TEMP_PATH = Path(os.getenv("TEMP_PATH", DEFAULT_TMP_PATH))
class BuildException(Exception):
pass
@@ -68,9 +69,22 @@ def run_docker_image_with_env(
ch_root: Path,
cargo_cache_dir: Path,
ccache_dir: Optional[Path],
+ aws_secrets : Optional[Dict[str,str]]
) -> None:
output_dir.mkdir(parents=True, exist_ok=True)
cargo_cache_dir.mkdir(parents=True, exist_ok=True)
+ extra_parts = ""
+
+ if aws_secrets:
+ # Pass AWS credentials via file rather than via env to avoid leaking secrets
+ env_part = {"AWS_CONFIG_FILE": "/home/clickhouse/.aws/credentials"}
+ host_aws_config_file_path = Path(TEMP_PATH) / 'aws_config'
+ with open(host_aws_config_file_path, 'wt') as f:
+ f.write("[default]")
+ for key, value in aws_secrets.items():
+ f.write(f"\n{key}={value}")
+
+ extra_parts = f"--volume={host_aws_config_file_path}:{env_part['AWS_CONFIG_FILE']}"
env_part = " -e ".join(env_variables)
if env_part:
@@ -93,6 +107,7 @@ def run_docker_image_with_env(
cmd = (
f"docker run --network=host --user={user} --rm {ccache_mount} "
f"--volume={output_dir}:/output --volume={ch_root}:/build {env_part} "
+ f" {extra_parts} "
f"--volume={cargo_cache_dir}:/rust/cargo/registry {interactive} {image_name} /build/docker/packager/binary-builder/build.sh"
)
@@ -435,6 +450,14 @@ def parse_args() -> argparse.Namespace:
type=dir_name,
help="a directory with ccache",
)
+ parser.add_argument(
+ "--s3-access-key-id",
+ help="an S3 access key id used for sscache bucket",
+ )
+ parser.add_argument(
+ "--s3-secret-access-key",
+ help="an S3 secret access key used for sscache bucket",
+ )
parser.add_argument(
"--s3-bucket",
help="an S3 bucket used for sscache and clang-tidy-cache",
@@ -541,6 +564,10 @@ def main() -> None:
ch_root,
args.cargo_cache_dir,
args.ccache_dir,
+ {
+ "aws_access_key_id" : args.s3_access_key_id,
+ "aws_secret_access_key" : args.s3_secret_access_key
+ }
)
logging.info("Output placed into %s", args.output_dir)
diff --git a/docker/server/README.md b/docker/server/README.md
index 61e86b8a9b53..68e81956a700 100644
--- a/docker/server/README.md
+++ b/docker/server/README.md
@@ -193,4 +193,4 @@ EOSQL
## License
-View [license information](https://github.com/ClickHouse/ClickHouse/blob/master/LICENSE) for the software contained in this image.
+View [license information](https://github.com/Altinity/ClickHouse/blob/antalya/LICENSE) for the software contained in this image.
diff --git a/docker/server/README.sh b/docker/server/README.sh
index 42fa72404d1f..0441d1e7f633 100755
--- a/docker/server/README.sh
+++ b/docker/server/README.sh
@@ -34,5 +34,5 @@ EOD
# Remove %%LOGO%% from the file with one line below
sed -i '/^%%LOGO%%/,+1d' "$R"
-# Replace each %%IMAGE%% with our `clickhouse/clickhouse-server`
-sed -i '/%%IMAGE%%/s:%%IMAGE%%:clickhouse/clickhouse-server:g' $R
+# Replace each %%IMAGE%% with our `altinity/clickhouse-server`
+sed -i '/%%IMAGE%%/s:%%IMAGE%%:altinity/clickhouse-server:g' $R
diff --git a/docker/server/README.src/github-repo b/docker/server/README.src/github-repo
index 70a009ec9588..721b5d7bc3f8 100644
--- a/docker/server/README.src/github-repo
+++ b/docker/server/README.src/github-repo
@@ -1 +1 @@
-https://github.com/ClickHouse/ClickHouse
+https://github.com/Altinity/ClickHouse/
diff --git a/docker/server/README.src/license.md b/docker/server/README.src/license.md
index 6be024edcdec..fac387a11c5b 100644
--- a/docker/server/README.src/license.md
+++ b/docker/server/README.src/license.md
@@ -1 +1 @@
-View [license information](https://github.com/ClickHouse/ClickHouse/blob/master/LICENSE) for the software contained in this image.
+View [license information](https://github.com/Altinity/ClickHouse/blob/antalya/LICENSE) for the software contained in this image.
diff --git a/docker/server/README.src/logo.svg b/docker/server/README.src/logo.svg
index a50dd81a1645..886f4f0e4ddd 100644
--- a/docker/server/README.src/logo.svg
+++ b/docker/server/README.src/logo.svg
@@ -1,43 +1,17 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
+
+
+
+
+
+
+
+
diff --git a/docker/server/README.src/maintainer.md b/docker/server/README.src/maintainer.md
index 26c7db1a2934..90f15bb5337c 100644
--- a/docker/server/README.src/maintainer.md
+++ b/docker/server/README.src/maintainer.md
@@ -1 +1 @@
-[ClickHouse Inc.](%%GITHUB-REPO%%)
+[Altinity Inc.](%%GITHUB-REPO%%)
diff --git a/docker/test/README.md b/docker/test/README.md
index 563cfd837e95..baca52cd1149 100644
--- a/docker/test/README.md
+++ b/docker/test/README.md
@@ -2,4 +2,4 @@
## License
-View [license information](https://github.com/ClickHouse/ClickHouse/blob/master/LICENSE) for the software contained in this image.
+View [license information](https://github.com/Altinity/ClickHouse/blob/antalya/LICENSE) for the software contained in this image.
diff --git a/docker/test/base/Dockerfile b/docker/test/base/Dockerfile
index 2e9af0a4a2d4..26cd016a5abc 100644
--- a/docker/test/base/Dockerfile
+++ b/docker/test/base/Dockerfile
@@ -1,7 +1,7 @@
# rebuild in #33610
-# docker build -t clickhouse/test-base .
+# docker build -t altinityinfra/test-base .
ARG FROM_TAG=latest
-FROM clickhouse/test-util:$FROM_TAG
+FROM altinityinfra/test-util:$FROM_TAG
RUN apt-get update \
&& apt-get install \
diff --git a/docker/test/clickbench/Dockerfile b/docker/test/clickbench/Dockerfile
index 0b6b1736e031..214191a8b488 100644
--- a/docker/test/clickbench/Dockerfile
+++ b/docker/test/clickbench/Dockerfile
@@ -1,5 +1,5 @@
ARG FROM_TAG=latest
-FROM clickhouse/test-base:$FROM_TAG
+FROM altinityinfra/test-base:$FROM_TAG
ENV TZ=Europe/Amsterdam
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
diff --git a/docker/test/compatibility/centos/Dockerfile b/docker/test/compatibility/centos/Dockerfile
index 628609e374f6..1edb42422b1f 100644
--- a/docker/test/compatibility/centos/Dockerfile
+++ b/docker/test/compatibility/centos/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/test-old-centos .
+# docker build -t altinityinfra/test-old-centos .
FROM centos:5
CMD /bin/sh -c "/clickhouse server --config /config/config.xml > /var/log/clickhouse-server/stderr.log 2>&1 & \
diff --git a/docker/test/compatibility/ubuntu/Dockerfile b/docker/test/compatibility/ubuntu/Dockerfile
index ddd0a76bd446..0eb283ff3daf 100644
--- a/docker/test/compatibility/ubuntu/Dockerfile
+++ b/docker/test/compatibility/ubuntu/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/test-old-ubuntu .
+# docker build -t altinityinfra/test-old-ubuntu .
FROM ubuntu:12.04
CMD /bin/sh -c "/clickhouse server --config /config/config.xml > /var/log/clickhouse-server/stderr.log 2>&1 & \
diff --git a/docker/test/fasttest/Dockerfile b/docker/test/fasttest/Dockerfile
index 264eb7bee326..c0a9c9b30bac 100644
--- a/docker/test/fasttest/Dockerfile
+++ b/docker/test/fasttest/Dockerfile
@@ -1,6 +1,6 @@
-# docker build -t clickhouse/fasttest .
+# docker build -t altinityinfra/fasttest .
ARG FROM_TAG=latest
-FROM clickhouse/test-util:$FROM_TAG
+FROM altinityinfra/test-util:$FROM_TAG
RUN apt-get update \
&& apt-get install \
@@ -66,6 +66,7 @@ RUN mkdir /tmp/ccache \
-DCMAKE_BUILD_TYPE=None \
-DZSTD_FROM_INTERNET=ON \
-DREDIS_STORAGE_BACKEND=OFF \
+ -DCMAKE_POLICY_VERSION_MINIMUM=3.5 \
-Wno-dev \
-B build \
-S . \
diff --git a/docker/test/fuzzer/Dockerfile b/docker/test/fuzzer/Dockerfile
index e1fb09b8ed57..f79fa706e72e 100644
--- a/docker/test/fuzzer/Dockerfile
+++ b/docker/test/fuzzer/Dockerfile
@@ -1,7 +1,7 @@
# rebuild in #33610
-# docker build -t clickhouse/fuzzer .
+# docker build -t altinityinfra/fuzzer .
ARG FROM_TAG=latest
-FROM clickhouse/test-base:$FROM_TAG
+FROM altinityinfra/test-base:$FROM_TAG
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"
@@ -41,5 +41,5 @@ CMD set -o pipefail \
&& cd /workspace \
&& timeout -s 9 1h /run-fuzzer.sh 2>&1 | ts "$(printf '%%Y-%%m-%%d %%H:%%M:%%S\t')" | tee main.log
-# docker run --network=host --volume :/workspace -e PR_TO_TEST=<> -e SHA_TO_TEST=<> clickhouse/fuzzer
+# docker run --network=host --volume :/workspace -e PR_TO_TEST=<> -e SHA_TO_TEST=<> altinityinfra/fuzzer
diff --git a/docker/test/integration/base/Dockerfile b/docker/test/integration/base/Dockerfile
index dc4d470a2623..92d4ef9ec9c0 100644
--- a/docker/test/integration/base/Dockerfile
+++ b/docker/test/integration/base/Dockerfile
@@ -1,7 +1,7 @@
# rebuild in #33610
-# docker build -t clickhouse/integration-test .
+# docker build -t altinityinfra/integration-test .
ARG FROM_TAG=latest
-FROM clickhouse/test-base:$FROM_TAG
+FROM altinityinfra/test-base:$FROM_TAG
SHELL ["/bin/bash", "-c"]
@@ -73,5 +73,5 @@ maxClientCnxns=80' > /opt/zookeeper/conf/zoo.cfg && \
ENV TZ=Etc/UTC
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
-COPY --from=clickhouse/cctools:0d6b90a7a490 /opt/gdb /opt/gdb
+COPY --from=altinityinfra/cctools:0de95b3aeb28 /opt/gdb /opt/gdb
ENV PATH="/opt/gdb/bin:${PATH}"
diff --git a/docker/test/integration/clickhouse_with_unity_catalog/Dockerfile b/docker/test/integration/clickhouse_with_unity_catalog/Dockerfile
index f711d7258a9e..0dcaf2df80b9 100644
--- a/docker/test/integration/clickhouse_with_unity_catalog/Dockerfile
+++ b/docker/test/integration/clickhouse_with_unity_catalog/Dockerfile
@@ -1,6 +1,6 @@
-# docker build -t clickhouse/integration-test-with-unity-catalog .
+# docker build -t altinityinfra/integration-test-with-unity-catalog .
ARG FROM_TAG=latest
-FROM clickhouse/integration-test:$FROM_TAG
+FROM altinityinfra/integration-test:$FROM_TAG
RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get -y install openjdk-17-jdk-headless && update-alternatives --config java && update-alternatives --config javac
diff --git a/docker/test/integration/helper_container/Dockerfile b/docker/test/integration/helper_container/Dockerfile
index 1084d087e53b..81d658705836 100644
--- a/docker/test/integration/helper_container/Dockerfile
+++ b/docker/test/integration/helper_container/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/integration-helper .
+# docker build -t altinityinfra/integration-helper .
# Helper docker container to run iptables without sudo
FROM alpine:3.18
diff --git a/docker/test/integration/kerberos_kdc/Dockerfile b/docker/test/integration/kerberos_kdc/Dockerfile
index a203c33a3313..a7f989bf4a56 100644
--- a/docker/test/integration/kerberos_kdc/Dockerfile
+++ b/docker/test/integration/kerberos_kdc/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/kerberos-kdc .
+# docker build -t altinityinfra/kerberos-kdc .
FROM centos:6
RUN sed -i '/^mirrorlist/s/^/#/;/^#baseurl/{s/#//;s/mirror.centos.org\/centos\/$releasever/vault.centos.org\/6.10/}' /etc/yum.repos.d/*B*
diff --git a/docker/test/integration/mysql_golang_client/Dockerfile b/docker/test/integration/mysql_golang_client/Dockerfile
index 5281f786ae2d..52be68126e47 100644
--- a/docker/test/integration/mysql_golang_client/Dockerfile
+++ b/docker/test/integration/mysql_golang_client/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/mysql-golang-client .
+# docker build -t altinityinfra/mysql-golang-client .
# MySQL golang client docker container
FROM golang:1.17
diff --git a/docker/test/integration/mysql_java_client/Dockerfile b/docker/test/integration/mysql_java_client/Dockerfile
index 38fefac070e7..5826ee77d501 100644
--- a/docker/test/integration/mysql_java_client/Dockerfile
+++ b/docker/test/integration/mysql_java_client/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/mysql-java-client .
+# docker build -t altinityinfra/mysql-java-client .
# MySQL Java client docker container
FROM openjdk:8-jdk-alpine
diff --git a/docker/test/integration/mysql_js_client/Dockerfile b/docker/test/integration/mysql_js_client/Dockerfile
index 4c9df10ace1c..2b821f243234 100644
--- a/docker/test/integration/mysql_js_client/Dockerfile
+++ b/docker/test/integration/mysql_js_client/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/mysql-js-client .
+# docker build -t altinityinfra/mysql-js-client .
# MySQL JavaScript client docker container
FROM node:16.14.2
diff --git a/docker/test/integration/mysql_php_client/Dockerfile b/docker/test/integration/mysql_php_client/Dockerfile
index 0e11ae023e63..b060e93f70a3 100644
--- a/docker/test/integration/mysql_php_client/Dockerfile
+++ b/docker/test/integration/mysql_php_client/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/mysql-php-client .
+# docker build -t altinityinfra/mysql-php-client .
# MySQL PHP client docker container
FROM php:8-cli-alpine
diff --git a/docker/test/integration/postgresql_java_client/Dockerfile b/docker/test/integration/postgresql_java_client/Dockerfile
index c5583085ef37..5a7458cc1d2f 100644
--- a/docker/test/integration/postgresql_java_client/Dockerfile
+++ b/docker/test/integration/postgresql_java_client/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/postgresql-java-client .
+# docker build -t altinityinfra/postgresql-java-client .
# PostgreSQL Java client docker container
FROM ubuntu:18.04
diff --git a/docker/test/integration/resolver/Dockerfile b/docker/test/integration/resolver/Dockerfile
index 423faf835ae1..1f639bb2793d 100644
--- a/docker/test/integration/resolver/Dockerfile
+++ b/docker/test/integration/resolver/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/python-bottle .
+# docker build -t altinityinfra/python-bottle .
# Helper docker container to run python bottle apps
# python cgi module is dropped in 3.13 - pin to 3.12
diff --git a/docker/test/integration/resolver/requirements.txt b/docker/test/integration/resolver/requirements.txt
index fbf852953296..314b112319b3 100644
--- a/docker/test/integration/resolver/requirements.txt
+++ b/docker/test/integration/resolver/requirements.txt
@@ -1,6 +1,6 @@
-bottle==0.12.25
-packaging==24.1
-pip==23.2.1
-pipdeptree==2.23.0
-setuptools==69.0.3
-wheel==0.42.0
+bottle~=0.13
+packaging~=24.1
+pip~=23.2.1
+pipdeptree~=2.23.0
+setuptools~=69.0.3
+wheel~=0.42.0
diff --git a/docker/test/integration/runner/Dockerfile b/docker/test/integration/runner/Dockerfile
index 469f691bebb4..3e13cafa4658 100644
--- a/docker/test/integration/runner/Dockerfile
+++ b/docker/test/integration/runner/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/integration-tests-runner .
+# docker build -t altinityinfra/integration-tests-runner .
FROM ubuntu:22.04
# ARG for quick switch to a given ubuntu mirror
@@ -85,7 +85,7 @@ COPY modprobe.sh /usr/local/bin/modprobe
COPY dockerd-entrypoint.sh /usr/local/bin/
COPY misc/ /misc/
-COPY --from=clickhouse/cctools:0d6b90a7a490 /opt/gdb /opt/gdb
+COPY --from=altinityinfra/cctools:0de95b3aeb28 /opt/gdb /opt/gdb
ENV PATH="/opt/gdb/bin:${PATH}"
# Same options as in test/base/Dockerfile
diff --git a/docker/test/integration/runner/dockerd-entrypoint.sh b/docker/test/integration/runner/dockerd-entrypoint.sh
index 63087d9d4c8c..6863ad7dd181 100755
--- a/docker/test/integration/runner/dockerd-entrypoint.sh
+++ b/docker/test/integration/runner/dockerd-entrypoint.sh
@@ -4,12 +4,12 @@ set -e
mkdir -p /etc/docker/
echo '{
"ipv6": true,
- "fixed-cidr-v6": "fd00::/8",
+ "fixed-cidr-v6": "2001:db8:1::/64",
"ip-forward": true,
"log-level": "debug",
"storage-driver": "overlay2",
- "insecure-registries" : ["dockerhub-proxy.dockerhub-proxy-zone:5000"],
- "registry-mirrors" : ["http://dockerhub-proxy.dockerhub-proxy-zone:5000"]
+ "insecure-registries" : ["65.108.242.32:5000"],
+ "registry-mirrors" : ["http://65.108.242.32:5000"]
}' | dd of=/etc/docker/daemon.json 2>/dev/null
if [ -f /sys/fs/cgroup/cgroup.controllers ]; then
diff --git a/docker/test/integration/s3_proxy/Dockerfile b/docker/test/integration/s3_proxy/Dockerfile
index 5858218e4e4c..df8d8f00f216 100644
--- a/docker/test/integration/s3_proxy/Dockerfile
+++ b/docker/test/integration/s3_proxy/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/s3-proxy .
+# docker build -t altinityinfra/s3-proxy .
FROM nginx:alpine
COPY run.sh /run.sh
diff --git a/docker/test/keeper-jepsen/Dockerfile b/docker/test/keeper-jepsen/Dockerfile
index 3c5d0a6ecb42..d3080a526711 100644
--- a/docker/test/keeper-jepsen/Dockerfile
+++ b/docker/test/keeper-jepsen/Dockerfile
@@ -1,7 +1,7 @@
# rebuild in #33610
-# docker build -t clickhouse/keeper-jepsen-test .
+# docker build -t altinityinfra/keeper-jepsen-test .
ARG FROM_TAG=latest
-FROM clickhouse/test-base:$FROM_TAG
+FROM altinityinfra/test-base:$FROM_TAG
ENV DEBIAN_FRONTEND=noninteractive
ENV CLOJURE_VERSION=1.10.3.814
diff --git a/docker/test/libfuzzer/Dockerfile b/docker/test/libfuzzer/Dockerfile
index 46e305c90ab4..157078d0f7f4 100644
--- a/docker/test/libfuzzer/Dockerfile
+++ b/docker/test/libfuzzer/Dockerfile
@@ -1,6 +1,6 @@
-# docker build -t clickhouse/libfuzzer .
+# docker build -t altinityinfra/libfuzzer .
ARG FROM_TAG=latest
-FROM clickhouse/test-base:$FROM_TAG
+FROM altinityinfra/test-base:$FROM_TAG
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"
@@ -35,5 +35,5 @@ RUN pip3 install --no-cache-dir -r /requirements.txt
SHELL ["/bin/bash", "-c"]
-# docker run --network=host --volume :/workspace -e PR_TO_TEST=<> -e SHA_TO_TEST=<> clickhouse/libfuzzer
+# docker run --network=host --volume :/workspace -e PR_TO_TEST=<> -e SHA_TO_TEST=<> altinityinfra/libfuzzer
diff --git a/docker/test/performance-comparison/Dockerfile b/docker/test/performance-comparison/Dockerfile
index f71392752826..4e80c729eeca 100644
--- a/docker/test/performance-comparison/Dockerfile
+++ b/docker/test/performance-comparison/Dockerfile
@@ -1,7 +1,7 @@
-# docker build -t clickhouse/performance-comparison .
+# docker build -t altinityinfra/performance-comparison .
ARG FROM_TAG=latest
-FROM clickhouse/test-base:$FROM_TAG
+FROM altinityinfra/test-base:$FROM_TAG
RUN apt-get update \
&& DEBIAN_FRONTEND=noninteractive apt-get install --yes --no-install-recommends \
@@ -41,9 +41,9 @@ RUN pip3 --no-cache-dir install -r requirements.txt
COPY run.sh /
-COPY --from=clickhouse/cctools:0d6b90a7a490 /opt/gdb /opt/gdb
+COPY --from=altinityinfra/cctools:0de95b3aeb28 /opt/gdb /opt/gdb
ENV PATH="/opt/gdb/bin:${PATH}"
CMD ["bash", "/run.sh"]
-# docker run --network=host --volume :/workspace --volume=:/output -e PR_TO_TEST=<> -e SHA_TO_TEST=<> clickhouse/performance-comparison
+# docker run --network=host --volume :/workspace --volume=:/output -e PR_TO_TEST=<> -e SHA_TO_TEST=<> altinityinfra/performance-comparison
diff --git a/docker/test/server-jepsen/Dockerfile b/docker/test/server-jepsen/Dockerfile
index fd70fc457020..5207f31b953f 100644
--- a/docker/test/server-jepsen/Dockerfile
+++ b/docker/test/server-jepsen/Dockerfile
@@ -1,7 +1,7 @@
# rebuild in #33610
-# docker build -t clickhouse/server-jepsen-test .
+# docker build -t altinityinfra/server-jepsen-test .
ARG FROM_TAG=latest
-FROM clickhouse/test-base:$FROM_TAG
+FROM altinityinfra/test-base:$FROM_TAG
ENV DEBIAN_FRONTEND=noninteractive
ENV CLOJURE_VERSION=1.10.3.814
diff --git a/docker/test/sqlancer/Dockerfile b/docker/test/sqlancer/Dockerfile
index 9a48bf6b8d3c..980dcfba928c 100644
--- a/docker/test/sqlancer/Dockerfile
+++ b/docker/test/sqlancer/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/sqlancer-test .
+# docker build -t altinityinfra/sqlancer-test .
FROM ubuntu:22.04
# ARG for quick switch to a given ubuntu mirror
diff --git a/docker/test/sqllogic/Dockerfile b/docker/test/sqllogic/Dockerfile
index 0d21a2da44ee..767e5eecfa84 100644
--- a/docker/test/sqllogic/Dockerfile
+++ b/docker/test/sqllogic/Dockerfile
@@ -1,6 +1,6 @@
-# docker build -t clickhouse/sqllogic-test .
+# docker build -t altinityinfra/sqllogic-test .
ARG FROM_TAG=latest
-FROM clickhouse/test-base:$FROM_TAG
+FROM altinityinfra/test-base:$FROM_TAG
RUN apt-get update --yes \
&& env DEBIAN_FRONTEND=noninteractive \
diff --git a/docker/test/sqltest/Dockerfile b/docker/test/sqltest/Dockerfile
index b805bb03c2b0..e21cb2d7febb 100644
--- a/docker/test/sqltest/Dockerfile
+++ b/docker/test/sqltest/Dockerfile
@@ -1,6 +1,6 @@
-# docker build -t clickhouse/sqltest .
+# docker build -t altinityinfra/sqltest .
ARG FROM_TAG=latest
-FROM clickhouse/test-base:$FROM_TAG
+FROM altinityinfra/test-base:$FROM_TAG
RUN apt-get update --yes \
&& env DEBIAN_FRONTEND=noninteractive \
diff --git a/docker/test/stateful/Dockerfile b/docker/test/stateful/Dockerfile
index 9aa936cb069e..a3e2163b2731 100644
--- a/docker/test/stateful/Dockerfile
+++ b/docker/test/stateful/Dockerfile
@@ -1,7 +1,7 @@
# rebuild in #47031
-# docker build -t clickhouse/stateful-test .
+# docker build -t altinityinfra/stateful-test .
ARG FROM_TAG=latest
-FROM clickhouse/stateless-test:$FROM_TAG
+FROM altinityinfra/stateless-test:$FROM_TAG
RUN apt-get update -y \
&& env DEBIAN_FRONTEND=noninteractive \
diff --git a/docker/test/stateless/Dockerfile b/docker/test/stateless/Dockerfile
index 8d9d683bbb92..2d99b850dba5 100644
--- a/docker/test/stateless/Dockerfile
+++ b/docker/test/stateless/Dockerfile
@@ -1,7 +1,7 @@
# rebuild in #33610
-# docker build -t clickhouse/stateless-test .
+# docker build -t altinityinfra/stateless-test .
ARG FROM_TAG=latest
-FROM clickhouse/test-base:$FROM_TAG
+FROM altinityinfra/test-base:$FROM_TAG
ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.6.20200320/clickhouse-odbc-1.1.6-Linux.tar.gz"
diff --git a/docker/test/stress/Dockerfile b/docker/test/stress/Dockerfile
index ecb98a4e3eda..4a1979a1c253 100644
--- a/docker/test/stress/Dockerfile
+++ b/docker/test/stress/Dockerfile
@@ -1,7 +1,7 @@
# rebuild in #33610
-# docker build -t clickhouse/stress-test .
+# docker build -t altinityinfra/stress-test .
ARG FROM_TAG=latest
-FROM clickhouse/stateful-test:$FROM_TAG
+FROM altinityinfra/stateful-test:$FROM_TAG
RUN apt-get update -y \
&& env DEBIAN_FRONTEND=noninteractive \
diff --git a/docker/test/stress/README.md b/docker/test/stress/README.md
index fe73555fbd23..3d0fa2c9f467 100644
--- a/docker/test/stress/README.md
+++ b/docker/test/stress/README.md
@@ -6,7 +6,7 @@ Usage:
```
$ ls $HOME/someclickhouse
clickhouse-client_18.14.9_all.deb clickhouse-common-static_18.14.9_amd64.deb clickhouse-server_18.14.9_all.deb
-$ docker run --volume=$HOME/someclickhouse:/package_folder --volume=$HOME/test_output:/test_output clickhouse/stress-test
+$ docker run --volume=$HOME/someclickhouse:/package_folder --volume=$HOME/test_output:/test_output altinityinfra/stress-test
Selecting previously unselected package clickhouse-common-static.
(Reading database ... 14442 files and directories currently installed.)
...
diff --git a/docker/test/unit/Dockerfile b/docker/test/unit/Dockerfile
index 9f4b86aa0ca7..adc72011bd1d 100644
--- a/docker/test/unit/Dockerfile
+++ b/docker/test/unit/Dockerfile
@@ -1,7 +1,7 @@
# rebuild in #33610
-# docker build -t clickhouse/unit-test .
+# docker build -t altinityinfra/unit-test .
ARG FROM_TAG=latest
-FROM clickhouse/test-base:$FROM_TAG
+FROM altinityinfra/test-base:$FROM_TAG
COPY run.sh /
RUN chmod +x run.sh
diff --git a/docker/test/upgrade/Dockerfile b/docker/test/upgrade/Dockerfile
new file mode 100644
index 000000000000..c66868c2a046
--- /dev/null
+++ b/docker/test/upgrade/Dockerfile
@@ -0,0 +1,29 @@
+# rebuild in #33610
+# docker build -t altinityinfra/upgrade-check .
+ARG FROM_TAG=latest
+FROM altinityinfra/stateful-test:$FROM_TAG
+
+RUN apt-get update -y \
+ && env DEBIAN_FRONTEND=noninteractive \
+ apt-get install --yes --no-install-recommends \
+ bash \
+ tzdata \
+ parallel \
+ expect \
+ python3 \
+ python3-lxml \
+ python3-termcolor \
+ python3-requests \
+ curl \
+ sudo \
+ openssl \
+ netcat-openbsd \
+ brotli \
+ && apt-get clean \
+ && rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
+
+COPY run.sh /
+
+ENV EXPORT_S3_STORAGE_POLICIES=1
+
+CMD ["/bin/bash", "/run.sh"]
diff --git a/docker/test/util/Dockerfile b/docker/test/util/Dockerfile
index edc133e592fe..3fd757a06234 100644
--- a/docker/test/util/Dockerfile
+++ b/docker/test/util/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/test-util .
+# docker build -t altinityinfra/test-util .
FROM ubuntu:22.04
# ARG for quick switch to a given ubuntu mirror
@@ -56,5 +56,5 @@ RUN apt-get update \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
-COPY --from=clickhouse/cctools:0d6b90a7a490 /opt/gdb /opt/gdb
+COPY --from=altinityinfra/cctools:0de95b3aeb28 /opt/gdb /opt/gdb
ENV PATH="/opt/gdb/bin:${PATH}"
diff --git a/packages/clickhouse-client.yaml b/packages/clickhouse-client.yaml
index 34b42d92adfe..c8469b1744c5 100644
--- a/packages/clickhouse-client.yaml
+++ b/packages/clickhouse-client.yaml
@@ -11,12 +11,12 @@ description: |
arch: "${DEB_ARCH}" # amd64, arm64
platform: "linux"
version: "${CLICKHOUSE_VERSION_STRING}"
-vendor: "ClickHouse Inc."
-homepage: "https://clickhouse.com"
+vendor: "Altinity Inc."
+homepage: "https://altinity.com"
license: "Apache"
section: "database"
priority: "optional"
-maintainer: "ClickHouse Dev Team "
+maintainer: "Altinity Server Dev Team "
deb:
fields:
Source: clickhouse
diff --git a/packages/clickhouse-common-static-dbg.yaml b/packages/clickhouse-common-static-dbg.yaml
index 74b7fa8381bc..c656fb372a0b 100644
--- a/packages/clickhouse-common-static-dbg.yaml
+++ b/packages/clickhouse-common-static-dbg.yaml
@@ -11,12 +11,12 @@ description: |
arch: "${DEB_ARCH}" # amd64, arm64
platform: "linux"
version: "${CLICKHOUSE_VERSION_STRING}"
-vendor: "ClickHouse Inc."
-homepage: "https://clickhouse.com"
+vendor: "Altinity Inc."
+homepage: "https://altinity.com"
license: "Apache"
section: "database"
priority: "optional"
-maintainer: "ClickHouse Dev Team "
+maintainer: "Altinity Server Dev Team "
deb:
fields:
Source: clickhouse
diff --git a/packages/clickhouse-common-static.yaml b/packages/clickhouse-common-static.yaml
index db330f808e15..cf7941eb46b8 100644
--- a/packages/clickhouse-common-static.yaml
+++ b/packages/clickhouse-common-static.yaml
@@ -11,12 +11,12 @@ description: |
arch: "${DEB_ARCH}" # amd64, arm64
platform: "linux"
version: "${CLICKHOUSE_VERSION_STRING}"
-vendor: "ClickHouse Inc."
-homepage: "https://clickhouse.com"
+vendor: "Altinity Inc."
+homepage: "https://altinity.com"
license: "Apache"
section: "database"
priority: "optional"
-maintainer: "ClickHouse Dev Team "
+maintainer: "Altinity Server Dev Team "
deb:
fields:
Source: clickhouse
diff --git a/packages/clickhouse-keeper-dbg.yaml b/packages/clickhouse-keeper-dbg.yaml
index 28d53b39518d..d992eaf2375f 100644
--- a/packages/clickhouse-keeper-dbg.yaml
+++ b/packages/clickhouse-keeper-dbg.yaml
@@ -11,12 +11,12 @@ description: |
arch: "${DEB_ARCH}" # amd64, arm64
platform: "linux"
version: "${CLICKHOUSE_VERSION_STRING}"
-vendor: "ClickHouse Inc."
-homepage: "https://clickhouse.com"
+vendor: "Altinity Inc."
+homepage: "https://altinity.com"
license: "Apache"
section: "database"
priority: "optional"
-maintainer: "ClickHouse Dev Team "
+maintainer: "Altinity Server Dev Team "
deb:
fields:
Source: clickhouse
diff --git a/packages/clickhouse-keeper.yaml b/packages/clickhouse-keeper.yaml
index 9dad5382c082..e6a0d18d9242 100644
--- a/packages/clickhouse-keeper.yaml
+++ b/packages/clickhouse-keeper.yaml
@@ -11,12 +11,12 @@ description: |
arch: "${DEB_ARCH}" # amd64, arm64
platform: "linux"
version: "${CLICKHOUSE_VERSION_STRING}"
-vendor: "ClickHouse Inc."
-homepage: "https://clickhouse.com"
+vendor: "Altinity Inc."
+homepage: "https://altinity.com"
license: "Apache"
section: "database"
priority: "optional"
-maintainer: "ClickHouse Dev Team "
+maintainer: "Altinity Server Dev Team "
deb:
fields:
Source: clickhouse
diff --git a/packages/clickhouse-server.yaml b/packages/clickhouse-server.yaml
index dc183ead1020..d1d36e4a4ba5 100644
--- a/packages/clickhouse-server.yaml
+++ b/packages/clickhouse-server.yaml
@@ -11,12 +11,12 @@ description: |
arch: "${DEB_ARCH}" # amd64, arm64
platform: "linux"
version: "${CLICKHOUSE_VERSION_STRING}"
-vendor: "ClickHouse Inc."
-homepage: "https://clickhouse.com"
+vendor: "Altinity Inc."
+homepage: "https://altinity.com"
license: "Apache"
section: "database"
priority: "optional"
-maintainer: "ClickHouse Dev Team "
+maintainer: "Altinity Server Dev Team "
deb:
fields:
Source: clickhouse
diff --git a/programs/server/binary.html b/programs/server/binary.html
index 5f2ccafbfdd2..314db61efaa6 100644
--- a/programs/server/binary.html
+++ b/programs/server/binary.html
@@ -2,7 +2,7 @@
-
+
ClickHouse Binary Viewer